summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig32
-rw-r--r--drivers/acpi/acpi_dbg.c4
-rw-r--r--drivers/acpi/acpi_lpss.c139
-rw-r--r--drivers/acpi/acpi_video.c14
-rw-r--r--drivers/acpi/acpica/acapps.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h82
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/acutils.h23
-rw-r--r--drivers/acpi/acpica/dbexec.c110
-rw-r--r--drivers/acpi/acpica/dbfileio.c4
-rw-r--r--drivers/acpi/acpica/dbinput.c145
-rw-r--r--drivers/acpi/acpica/dscontrol.c18
-rw-r--r--drivers/acpi/acpica/dsfield.c28
-rw-r--r--drivers/acpi/acpica/dsobject.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c21
-rw-r--r--drivers/acpi/acpica/dsutils.c3
-rw-r--r--drivers/acpi/acpica/dswload.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evregion.c10
-rw-r--r--drivers/acpi/acpica/exdump.c11
-rw-r--r--drivers/acpi/acpica/hwtimer.c29
-rw-r--r--drivers/acpi/acpica/hwvalid.c14
-rw-r--r--drivers/acpi/acpica/nsaccess.c13
-rw-r--r--drivers/acpi/acpica/nsconvert.c3
-rw-r--r--drivers/acpi/acpica/nsnames.c149
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/nsxfeval.c9
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psobject.c10
-rw-r--r--drivers/acpi/acpica/psutils.c14
-rw-r--r--drivers/acpi/acpica/utdebug.c18
-rw-r--r--drivers/acpi/acpica/utdecode.c11
-rw-r--r--drivers/acpi/acpica/uterror.c73
-rw-r--r--drivers/acpi/acpica/utinit.c1
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utnonansi.c11
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c42
-rw-r--r--drivers/acpi/acpica/uttrack.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c8
-rw-r--r--drivers/acpi/apei/ghes.c81
-rw-r--r--drivers/acpi/battery.c36
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/button.c22
-rw-r--r--drivers/acpi/device_pm.c27
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/evged.c47
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/property.c8
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c26
-rw-r--r--drivers/acpi/utils.c41
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/Kconfig28
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c94
-rw-r--r--drivers/ata/ahci.h3
-rw-r--r--drivers/ata/ahci_brcm.c120
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/pata_at32.c400
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_it821x.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/domain.c69
-rw-r--r--drivers/base/power/main.c415
-rw-r--r--drivers/base/power/power.h11
-rw-r--r--drivers/base/power/runtime.c84
-rw-r--r--drivers/base/power/sysfs.c182
-rw-r--r--drivers/base/power/wakeirq.c8
-rw-r--r--drivers/base/power/wakeup.c26
-rw-r--r--drivers/base/property.c7
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-flat.c15
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-sdw.c88
-rw-r--r--drivers/base/regmap/regmap.c47
-rw-r--r--drivers/block/DAC960.c160
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/aoe/aoe.h3
-rw-r--r--drivers/block/aoe/aoecmd.c48
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/null_blk.c290
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/smart1,2.h278
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/dtlk.c6
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig45
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c169
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c154
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/hw_random/exynos-trng.c235
-rw-r--r--drivers/char/hw_random/imx-rngc.c13
-rw-r--r--drivers/char/hw_random/mtk-rng.c1
-rw-r--r--drivers/char/hw_random/tpm-rng.c50
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c4
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c4
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/random.c28
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/snsc.c4
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/Kconfig11
-rw-r--r--drivers/char/tpm/Makefile5
-rw-r--r--drivers/char/tpm/tpm-chip.c67
-rw-r--r--drivers/char/tpm/tpm-interface.c231
-rw-r--r--drivers/char/tpm/tpm.h52
-rw-r--r--drivers/char/tpm/tpm1_eventlog.c13
-rw-r--r--drivers/char/tpm/tpm2-cmd.c12
-rw-r--r--drivers/char/tpm/tpm2_eventlog.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog.h138
-rw-r--r--drivers/char/tpm/tpm_eventlog_acpi.c (renamed from drivers/char/tpm/tpm_acpi.c)4
-rw-r--r--drivers/char/tpm/tpm_eventlog_efi.c66
-rw-r--r--drivers/char/tpm/tpm_eventlog_of.c (renamed from drivers/char/tpm/tpm_of.c)6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c27
-rw-r--r--drivers/char/tpm/tpm_tis.c108
-rw-r--r--drivers/char/tpm/tpm_tis_core.c193
-rw-r--r--drivers/char/tpm/tpm_tis_core.h16
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c61
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xillybus/xillybus_core.c4
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/owl-timer.c5
-rw-r--r--drivers/clocksource/tcb_clksrc.c2
-rw-r--r--drivers/clocksource/timer-of.c84
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/clocksource/timer-sprd.c159
-rw-r--r--drivers/clocksource/timer-stm32.c358
-rw-r--r--drivers/cpufreq/Kconfig.arm88
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/arm_big_little.c23
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c241
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt.c27
-rw-r--r--drivers/cpufreq/cpufreq.c55
-rw-r--r--drivers/cpufreq/cpufreq_stats.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c171
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c23
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c16
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c143
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c14
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c193
-rw-r--r--drivers/cpufreq/ti-cpufreq.c51
-rw-r--r--drivers/cpuidle/governor.c5
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c6
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c131
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c8
-rw-r--r--drivers/crypto/bcm/cipher.c1
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/caamalg.c120
-rw-r--r--drivers/crypto/caam/caamalg_desc.c182
-rw-r--r--drivers/crypto/caam/caamalg_desc.h10
-rw-r--r--drivers/crypto/caam/caamalg_qi.c68
-rw-r--r--drivers/crypto/caam/caamhash.c73
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/desc.h29
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/key_gen.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig10
-rw-r--r--drivers/crypto/chelsio/Makefile1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c540
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h15
-rw-r--r--drivers/crypto/chelsio/chcr_core.c14
-rw-r--r--drivers/crypto/chelsio/chcr_core.h38
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h76
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c654
-rw-r--r--drivers/crypto/exynos-rng.c108
-rw-r--r--drivers/crypto/hifn_795x.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c370
-rw-r--r--drivers/crypto/inside-secure/safexcel.h173
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c53
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c125
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/marvell/cesa.c20
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c27
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c133
-rw-r--r--drivers/crypto/s5p-sss.c26
-rw-r--r--drivers/crypto/stm32/Kconfig13
-rw-r--r--drivers/crypto/stm32/Makefile5
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1170
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c2
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/dma-buf/dma-buf.c6
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/bcm2835-dma.c10
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/img-mdc-dma.c17
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/k3dma.c10
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/qcom/hidma.c41
-rw-r--r--drivers/dma/qcom/hidma_ll.c9
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c61
-rw-r--r--drivers/dma/s3c24xx-dma.c11
-rw-r--r--drivers/dma/sh/rcar-dmac.c68
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/stm32-dmamux.c3
-rw-r--r--drivers/dma/tegra20-apb-dma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/virt-dma.c5
-rw-r--r--drivers/dma/virt-dma.h44
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c302
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c179
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/ti_edac.c341
-rw-r--r--drivers/extcon/extcon-axp288.c39
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c142
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/nosy.c4
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_sdei.c1092
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/Makefile3
-rw-r--r--drivers/firmware/efi/capsule-loader.c2
-rw-r--r--drivers/firmware/efi/cper-arm.c356
-rw-r--r--drivers/firmware/efi/cper.c122
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/tpm.c81
-rw-r--r--drivers/firmware/efi/tpm.c40
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/psci_checker.c46
-rw-r--r--drivers/gpio/Kconfig32
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/devres.c42
-rw-r--r--drivers/gpio/gpio-74x164.c5
-rw-r--r--drivers/gpio/gpio-adp5520.c3
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-altera.c3
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c41
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-axp209.c188
-rw-r--r--drivers/gpio/gpio-bcm-kona.c8
-rw-r--r--drivers/gpio/gpio-brcmstb.c1
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-max732x.c6
-rw-r--r--drivers/gpio/gpio-merrifield.c11
-rw-r--r--drivers/gpio/gpio-mockup.c288
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c447
-rw-r--r--drivers/gpio/gpio-stmpe.c54
-rw-r--r--drivers/gpio/gpio-thunderx.c4
-rw-r--r--drivers/gpio/gpio-winbond.c732
-rw-r--r--drivers/gpio/gpiolib-acpi.c57
-rw-r--r--drivers/gpio/gpiolib-of.c119
-rw-r--r--drivers/gpio/gpiolib-sysfs.c33
-rw-r--r--drivers/gpio/gpiolib.c334
-rw-r--r--drivers/gpio/gpiolib.h18
-rw-r--r--drivers/gpu/drm/drm_file.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/r128/r128_state.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/Kconfig12
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-asus.c41
-rw-r--r--drivers/hid/hid-core.c932
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-elecom.c78
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-generic.c68
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-jabra.c58
-rw-r--r--drivers/hid/hid-multitouch.c137
-rw-r--r--drivers/hid/hid-quirks.c1276
-rw-r--r--drivers/hid/hid-rmi.c1
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c2
-rw-r--r--drivers/hid/hid-roccat.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sony.c93
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c18
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hid/usbhid/Makefile2
-rw-r--r--drivers/hid/usbhid/hid-core.c11
-rw-r--r--drivers/hid/usbhid/hid-quirks.c402
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/wacom_sys.c134
-rw-r--r--drivers/hid/wacom_wac.c67
-rw-r--r--drivers/hid/wacom_wac.h6
-rw-r--r--drivers/hsi/clients/cmt_speech.c8
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c22
-rw-r--r--drivers/hwmon/coretemp.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c54
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c12
-rw-r--r--drivers/hwmon/iio_hwmon.c3
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig1
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c288
-rw-r--r--drivers/hwmon/pmbus/ir35221.c189
-rw-r--r--drivers/hwmon/pmbus/lm25066.c67
-rw-r--r--drivers/hwmon/pmbus/max31785.c294
-rw-r--r--drivers/hwmon/pmbus/pmbus.h41
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c273
-rw-r--r--drivers/hwmon/sht15.c1
-rw-r--r--drivers/hwmon/sht21.c2
-rw-r--r--drivers/hwmon/sht3x.c7
-rw-r--r--drivers/hwmon/w83773g.c329
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c39
-rw-r--r--drivers/iio/adc/Kconfig37
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c68
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1205
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c302
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h310
-rw-r--r--drivers/iio/buffer/Kconfig10
-rw-r--r--drivers/iio/buffer/Makefile1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c11
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c247
-rw-r--r--drivers/iio/iio_core.h2
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-event.c4
-rw-r--r--drivers/iio/inkern.c17
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/addr.c65
-rw-r--r--drivers/infiniband/core/cache.c23
-rw-r--r--drivers/infiniband/core/cm.c227
-rw-r--r--drivers/infiniband/core/cma.c252
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/core_priv.h52
-rw-r--r--drivers/infiniband/core/cq.c39
-rw-r--r--drivers/infiniband/core/device.c42
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/mad.c1
-rw-r--r--drivers/infiniband/core/netlink.c10
-rw-r--r--drivers/infiniband/core/nldev.c394
-rw-r--r--drivers/infiniband/core/restrack.c164
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c13
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/security.c10
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucm.c77
-rw-r--r--drivers/infiniband/core/ucma.c23
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c127
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c19
-rw-r--r--drivers/infiniband/core/uverbs_main.c103
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c3
-rw-r--r--drivers/infiniband/core/verbs.c312
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h43
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c145
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h39
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c404
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c251
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c463
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h78
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c141
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h91
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h127
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c36
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c87
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c16
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c18
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c64
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h25
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c6
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c10
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c12
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c1
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h103
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c759
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c758
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1837
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h283
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c72
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c68
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c25
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c50
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c19
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c83
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1353
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h111
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c9
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c432
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h112
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c10
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c82
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c235
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c13
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c21
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c15
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c28
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h42
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h6
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c98
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c7
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c795
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h43
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c962
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h100
-rw-r--r--drivers/input/evdev.c4
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/input/serio/serio_raw.c4
-rw-r--r--drivers/input/serio/userio.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm2836.c46
-rw-r--r--drivers/irqchip/irq-gic-v3.c40
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c139
-rw-r--r--drivers/irqchip/irq-ompic.c4
-rw-r--r--drivers/isdn/capi/capi.c4
-rw-r--r--drivers/isdn/divert/divert_procfs.c4
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.h2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c22
-rw-r--r--drivers/isdn/mISDN/timerdev.c4
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-as3645a.c7
-rw-r--r--drivers/leds/leds-blinkm.c4
-rw-r--r--drivers/leds/leds-lm3692x.c393
-rw-r--r--drivers/leds/leds-lp8860.c66
-rw-r--r--drivers/leds/leds-pm8058.c2
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c496
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c33
-rw-r--r--drivers/leds/uleds.c2
-rw-r--r--drivers/lightnvm/Kconfig7
-rw-r--r--drivers/lightnvm/Makefile1
-rw-r--r--drivers/lightnvm/core.c462
-rw-r--r--drivers/lightnvm/pblk-cache.c5
-rw-r--r--drivers/lightnvm/pblk-core.c55
-rw-r--r--drivers/lightnvm/pblk-gc.c23
-rw-r--r--drivers/lightnvm/pblk-init.c104
-rw-r--r--drivers/lightnvm/pblk-map.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c111
-rw-r--r--drivers/lightnvm/pblk-read.c35
-rw-r--r--drivers/lightnvm/pblk-recovery.c43
-rw-r--r--drivers/lightnvm/pblk-rl.c54
-rw-r--r--drivers/lightnvm/pblk-sysfs.c15
-rw-r--r--drivers/lightnvm/pblk-write.c23
-rw-r--r--drivers/lightnvm/pblk.h163
-rw-r--r--drivers/lightnvm/rrpc.c1625
-rw-r--r--drivers/lightnvm/rrpc.h290
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/mailbox/mailbox-test.c2
-rw-r--r--drivers/md/Kconfig7
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/bcache.h24
-rw-r--r--drivers/md/bcache/btree.c10
-rw-r--r--drivers/md/bcache/closure.c47
-rw-r--r--drivers/md/bcache/closure.h60
-rw-r--r--drivers/md/bcache/debug.c7
-rw-r--r--drivers/md/bcache/io.c13
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/request.c29
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/bcache/util.c34
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/bcache/writeback.c203
-rw-r--r--drivers/md/bcache/writeback.h12
-rw-r--r--drivers/md/dm-bufio.c37
-rw-r--r--drivers/md/dm-core.h5
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-flakey.c5
-rw-r--r--drivers/md/dm-io.c3
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-kcopyd.c6
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c316
-rw-r--r--drivers/md/dm-queue-length.c6
-rw-r--r--drivers/md/dm-raid.c389
-rw-r--r--drivers/md/dm-rq.c34
-rw-r--r--drivers/md/dm-service-time.c6
-rw-r--r--drivers/md/dm-snap.c84
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c114
-rw-r--r--drivers/md/dm-thin.c9
-rw-r--r--drivers/md/dm-unstripe.c219
-rw-r--r--drivers/md/dm-zoned-metadata.c3
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c680
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md.c35
-rw-r--r--drivers/md/md.h9
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5-cache.c31
-rw-r--r--drivers/md/raid5-log.h30
-rw-r--r--drivers/md/raid5-ppl.c167
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/cec/cec-api.c4
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c8
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c7
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/media-devnode.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c12
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c8
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c10
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h4
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c4
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c8
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c4
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c4
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c4
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c4
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c4
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.h2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.h2
-rw-r--r--drivers/media/radio/radio-cadet.c6
-rw-r--r--drivers/media/radio/radio-si476x.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c6
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c6
-rw-r--r--drivers/media/usb/gspca/gspca.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c10
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c4
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c57
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/memory/omap-gpmc.c163
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c2
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c2
-rw-r--r--drivers/message/fusion/mptbase.c59
-rw-r--r--drivers/message/fusion/mptctl.c25
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/Kconfig41
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/ab8500-debugfs.c439
-rw-r--r--drivers/mfd/atmel-flexcom.c63
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/cros_ec.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c (renamed from drivers/platform/chrome/cros_ec_dev.c)8
-rw-r--r--drivers/mfd/cros_ec_dev.h (renamed from drivers/platform/chrome/cros_ec_dev.h)0
-rw-r--r--drivers/mfd/cros_ec_spi.c25
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c1
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lpc_ich.c5
-rw-r--r--drivers/mfd/max77843.c1
-rw-r--r--drivers/mfd/palmas.c10
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rave-sp.c710
-rw-r--r--drivers/mfd/stm32-lptimer.c6
-rw-r--r--drivers/mfd/stm32-timers.c4
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tmio_core.c20
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/Kconfig20
-rw-r--r--drivers/misc/cardreader/Makefile4
-rw-r--r--drivers/misc/cardreader/rtl8411.c (renamed from drivers/mfd/rtl8411.c)2
-rw-r--r--drivers/misc/cardreader/rts5209.c (renamed from drivers/mfd/rts5209.c)2
-rw-r--r--drivers/misc/cardreader/rts5227.c (renamed from drivers/mfd/rts5227.c)2
-rw-r--r--drivers/misc/cardreader/rts5229.c (renamed from drivers/mfd/rts5229.c)2
-rw-r--r--drivers/misc/cardreader/rts5249.c (renamed from drivers/mfd/rts5249.c)3
-rw-r--r--drivers/misc/cardreader/rts5260.c748
-rw-r--r--drivers/misc/cardreader/rts5260.h45
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c (renamed from drivers/mfd/rtsx_pcr.c)125
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h (renamed from drivers/mfd/rtsx_pcr.h)12
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c (renamed from drivers/mfd/rtsx_usb.c)2
-rw-r--r--drivers/misc/cxl/api.c2
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/file.c4
-rw-r--r--drivers/misc/cxl/vphb.c2
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/mei/main.c6
-rw-r--r--drivers/misc/mic/scif/scif_api.c7
-rw-r--r--drivers/misc/mic/scif/scif_epd.h2
-rw-r--r--drivers/misc/mic/scif/scif_fd.c2
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c4
-rw-r--r--drivers/misc/phantom.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/mmc/core/block.c1385
-rw-r--r--drivers/mmc/core/block.h12
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/core.c228
-rw-r--r--drivers/mmc/core/core.h47
-rw-r--r--drivers/mmc/core/host.h6
-rw-r--r--drivers/mmc/core/mmc_test.c221
-rw-r--r--drivers/mmc/core/queue.c504
-rw-r--r--drivers/mmc/core/queue.h64
-rw-r--r--drivers/mmc/core/slot-gpio.c22
-rw-r--r--drivers/mmc/host/Kconfig26
-rw-r--r--drivers/mmc/host/Makefile11
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/cqhci.c1150
-rw-r--r--drivers/mmc/host/cqhci.h240
-rw-r--r--drivers/mmc/host/davinci_mmc.c17
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/mmci.c127
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi.h22
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c49
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c37
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c132
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c22
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c149
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sdhci-pci-arasan.c331
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c218
-rw-r--r--drivers/mmc/host/sdhci-pci.h7
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mmc/host/sdhci.c57
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c52
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.c23
-rw-r--r--drivers/mmc/host/tmio_mmc.h43
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c152
-rw-r--r--drivers/mtd/devices/docg3.c70
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/mchp23k256.c18
-rw-r--r--drivers/mtd/mtdcore.c36
-rw-r--r--drivers/mtd/mtdpart.c43
-rw-r--r--drivers/mtd/mtdswap.c5
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c38
-rw-r--r--drivers/mtd/nand/cafe_nand.c52
-rw-r--r--drivers/mtd/nand/denali.c84
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_pci.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/docg4.c21
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c10
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c13
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c111
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h46
-rw-r--r--drivers/mtd/nand/hisi504_nand.c9
-rw-r--r--drivers/mtd/nand/jz4740_nand.c16
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c33
-rw-r--r--drivers/mtd/nand/marvell_nand.c2896
-rw-r--r--drivers/mtd/nand/mtk_ecc.c126
-rw-r--r--drivers/mtd/nand/mtk_ecc.h3
-rw-r--r--drivers/mtd/nand/mtk_nand.c76
-rw-r--r--drivers/mtd/nand/nand_base.c2309
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c129
-rw-r--r--drivers/mtd/nand/nand_micron.c83
-rw-r--r--drivers/mtd/nand/nand_samsung.c19
-rw-r--r--drivers/mtd/nand/nand_timings.c21
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c14
-rw-r--r--drivers/mtd/nand/qcom_nandc.c31
-rw-r--r--drivers/mtd/nand/r852.c11
-rw-r--r--drivers/mtd/nand/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/sm_common.h2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c111
-rw-r--r--drivers/mtd/nand/tango_nand.c27
-rw-r--r--drivers/mtd/nand/tmio_nand.c5
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/omap2.c577
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/samsung.c185
-rw-r--r--drivers/mtd/parsers/sharpslpart.c6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c6
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c240
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c75
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c21
-rw-r--r--drivers/mtd/ubi/block.c42
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c5
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/mtd/ubi/wl.c87
-rw-r--r--drivers/mtd/ubi/wl.h2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c102
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c91
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/tap.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/nubus/Makefile2
-rw-r--r--drivers/nubus/bus.c117
-rw-r--r--drivers/nubus/nubus.c542
-rw-r--r--drivers/nubus/proc.c281
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/core.c134
-rw-r--r--drivers/nvme/host/fabrics.c22
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c7
-rw-r--r--drivers/nvme/host/lightnvm.c185
-rw-r--r--drivers/nvme/host/multipath.c44
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c216
-rw-r--r--drivers/nvme/host/rdma.c6
-rw-r--r--drivers/nvme/host/trace.c130
-rw-r--r--drivers/nvme/host/trace.h165
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/fc.c60
-rw-r--r--drivers/nvme/target/fcloop.c244
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/rdma.c83
-rw-r--r--drivers/of/base.c26
-rw-r--r--drivers/of/property.c8
-rw-r--r--drivers/opp/Makefile1
-rw-r--r--drivers/opp/ti-opp-supply.c425
-rw-r--r--drivers/pci/pci-driver.c21
-rw-r--r--drivers/pci/pcie/portdrv_pci.c3
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_dsu_pmu.c843
-rw-r--r--drivers/perf/arm_pmu_platform.c15
-rw-r--r--drivers/perf/arm_spe_pmu.c9
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c31
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c476
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.h27
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c1
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/surfacepro3_button.c4
-rw-r--r--drivers/pnp/pnpbios/core.c5
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/power/avs/rockchip-io-domain.c24
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/reset/imx-snvs-poweroff.c66
-rw-r--r--drivers/power/reset/msm-poweroff.c7
-rw-r--r--drivers/power/reset/zx-reboot.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c6
-rw-r--r--drivers/power/supply/axp20x_ac_power.c8
-rw-r--r--drivers/power/supply/axp288_charger.c290
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c203
-rw-r--r--drivers/power/supply/bq24190_charger.c126
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/charger-manager.c2
-rw-r--r--drivers/power/supply/cpcap-battery.c4
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c25
-rw-r--r--drivers/power/supply/max17042_battery.c54
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/powercap/intel_rapl.c99
-rw-r--r--drivers/powercap/powercap_sys.c6
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/ptp/ptp_chardev.c2
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c383
-rw-r--r--drivers/regulator/internal.h27
-rw-r--r--drivers/regulator/of_regulator.c34
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c84
-rw-r--r--drivers/regulator/sc2731-regulator.c256
-rw-r--r--drivers/regulator/tps65218-regulator.c5
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c4
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/scsi/3w-9xxx.c26
-rw-r--r--drivers/scsi/3w-9xxx.h2
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c473
-rw-r--r--drivers/scsi/aacraid/aacraid.h54
-rw-r--r--drivers/scsi/aacraid/commctrl.c6
-rw-r--r--drivers/scsi/aacraid/comminit.c49
-rw-r--r--drivers/scsi/aacraid/commsup.c220
-rw-r--r--drivers/scsi/aacraid/linit.c26
-rw-r--r--drivers/scsi/aacraid/sa.c32
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h567
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1318
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_cs.h6
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c78
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c62
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c10
-rw-r--r--drivers/scsi/bfa/bfa_port.c15
-rw-r--r--drivers/scsi/bfa/bfa_port.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c51
-rw-r--r--drivers/scsi/bfa/bfa_svc.h2
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_attr.c5
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c10
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.h32
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c60
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c19
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/backend.h41
-rw-r--r--drivers/scsi/cxlflash/common.h8
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c168
-rw-r--r--drivers/scsi/cxlflash/main.c100
-rw-r--r--drivers/scsi/cxlflash/superpipe.c64
-rw-r--r--drivers/scsi/cxlflash/superpipe.h4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c37
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c22
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/fnic/fnic_stats.h4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c267
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c194
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c332
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c34
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c320
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_event.c86
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_init.c107
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c69
-rw-r--r--drivers/scsi/libsas/sas_port.c25
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c20
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c294
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c356
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c193
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c175
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c99
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c207
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c332
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c33
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/ppa.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c3
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1497
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1044
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c528
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_common.c14
-rw-r--r--drivers/scsi/scsi_debug.c724
-rw-r--r--drivers/scsi/scsi_devinfo.c39
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c16
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ses.c11
-rw-r--r--drivers/scsi/sg.c6
-rw-r--r--drivers/scsi/smartpqi/Makefile2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/ufs/ufshci.h16
-rw-r--r--drivers/scsi/wd719x.c4
-rw-r--r--drivers/spi/spi-armada-3700.c110
-rw-r--r--drivers/spi/spi-atmel.c113
-rw-r--r--drivers/spi/spi-bcm53xx.c26
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c9
-rw-r--r--drivers/spi/spi-imx.c26
-rw-r--r--drivers/spi/spi-jcore.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c1
-rw-r--r--drivers/spi/spi-orion.c21
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c18
-rw-r--r--drivers/spi/spi-sh-msiof.c128
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-xilinx.c1
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c2
-rw-r--r--drivers/staging/irda/net/af_irda.c4
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c8
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c4
-rw-r--r--drivers/staging/most/aim-v4l2/video.c4
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c5
-rw-r--r--drivers/staging/speakup/speakup_soft.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/target_core_transport.c46
-rw-r--r--drivers/thermal/cpu_cooling.c201
-rw-r--r--drivers/tty/n_gsm.c4
-rw-r--r--drivers/tty/n_hdlc.c6
-rw-r--r--drivers/tty/n_r3964.c6
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serdev/core.c31
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/vt/vc_screen.c4
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usblp.c4
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c30
-rw-r--r--drivers/usb/gadget/function/f_printer.c4
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/mon/mon_bin.c4
-rw-r--r--drivers/vfio/virqfd.c4
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c19
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/fbdev/macfb.c10
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/w1/masters/w1-gpio.c149
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/rave-sp-wdt.c337
-rw-r--r--drivers/xen/evtchn.c4
-rw-r--r--drivers/xen/mcelog.c2
-rw-r--r--drivers/xen/pvcalls-front.c10
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
1246 files changed, 58119 insertions, 25127 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 46505396869e..d650c5b6ec90 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -361,22 +361,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
-config X86_PM_TIMER
- bool "Power Management Timer Support" if EXPERT
- depends on X86
- default y
- help
- The Power Management Timer is available on all ACPI-capable,
- in most cases even if ACPI is unusable or blacklisted.
-
- This timing source is not affected by power management features
- like aggressive processor idling, throttling, frequency and/or
- voltage scaling, unlike the commonly used Time Stamp Counter
- (TSC) timing source.
-
- You should nearly always say Y here because many modern
- systems require this timer.
-
config ACPI_CONTAINER
bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
@@ -564,3 +548,19 @@ config TPS68470_PMIC_OPREGION
using this, are probed.
endif # ACPI
+
+config X86_PM_TIMER
+ bool "Power Management Timer Support" if EXPERT
+ depends on X86 && (ACPI || JAILHOUSE_GUEST)
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+ in most cases even if ACPI is unusable or blacklisted.
+
+ This timing source is not affected by power management features
+ like aggressive processor idling, throttling, frequency and/or
+ voltage scaling, unlike the commonly used Time Stamp Counter
+ (TSC) timing source.
+
+ You should nearly always say Y here because many modern
+ systems require this timer.
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 3ec05aa1a903..2ff5c8c04e3b 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -718,9 +718,9 @@ again:
return size > 0 ? size : ret;
}
-static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
+static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
{
- int masks = 0;
+ __poll_t masks = 0;
poll_wait(file, &acpi_aml_io.wait, wait);
if (acpi_aml_user_readable())
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 7f2b02cc8ea1..2bcffec8dbf0 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -427,6 +427,142 @@ out:
return 0;
}
+struct lpss_device_links {
+ const char *supplier_hid;
+ const char *supplier_uid;
+ const char *consumer_hid;
+ const char *consumer_uid;
+ u32 flags;
+};
+
+/*
+ * The _DEP method is used to identify dependencies but instead of creating
+ * device links for every handle in _DEP, only links in the following list are
+ * created. That is necessary because, in the general case, _DEP can refer to
+ * devices that might not have drivers, or that are on different buses, or where
+ * the supplier is not enumerated until after the consumer is probed.
+ */
+static const struct lpss_device_links lpss_device_links[] = {
+ {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+};
+
+static bool hid_uid_match(const char *hid1, const char *uid1,
+ const char *hid2, const char *uid2)
+{
+ return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static bool acpi_lpss_is_supplier(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->supplier_hid, link->supplier_uid);
+}
+
+static bool acpi_lpss_is_consumer(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->consumer_hid, link->consumer_uid);
+}
+
+struct hid_uid {
+ const char *hid;
+ const char *uid;
+};
+
+static int match_hid_uid(struct device *dev, void *data)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct hid_uid *id = data;
+
+ if (!adev)
+ return 0;
+
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ id->hid, id->uid);
+}
+
+static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+{
+ struct hid_uid data = {
+ .hid = hid,
+ .uid = uid,
+ };
+
+ return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+}
+
+static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_status status;
+ int i;
+
+ if (!acpi_has_method(adev->handle, "_DEP"))
+ return false;
+
+ status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
+ &dep_devices);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == handle)
+ return true;
+ }
+
+ return false;
+}
+
+static void acpi_lpss_link_consumer(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ device_link_add(dev2, dev1, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_link_supplier(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ device_link_add(dev1, dev2, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_create_device_links(struct acpi_device *adev,
+ struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
+ const struct lpss_device_links *link = &lpss_device_links[i];
+
+ if (acpi_lpss_is_supplier(adev, link))
+ acpi_lpss_link_consumer(&pdev->dev, link);
+
+ if (acpi_lpss_is_consumer(adev, link))
+ acpi_lpss_link_supplier(&pdev->dev, link);
+ }
+}
+
static int acpi_lpss_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
@@ -465,6 +601,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
+ /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
+ adev->pnp.type.platform_id = 0;
/* Skip the device, but continue the namespace scan. */
ret = 0;
goto err_out;
@@ -500,6 +638,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
+ acpi_lpss_create_device_links(adev, pdev);
return 1;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0972ec0e2eb8..f53ccc680238 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
goto leave;
}
+ /*
+ * We're seeing a lot of bogus backlight interfaces on newer machines
+ * without a LCD such as desktops, servers and HDMI sticks. Checking
+ * the lcd flag fixes this, so enable this on any machines which are
+ * win8 ready (where we also prefer the native backlight driver, so
+ * normally the acpi_video code should not register there anyways).
+ */
+ if (only_lcd == -1)
+ only_lcd = acpi_osi_is_win8();
+
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 7a1a68b5ac5c..2243c8164b34 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -80,6 +80,9 @@
prefix, ACPICA_COPYRIGHT, \
prefix
+#define ACPI_COMMON_BUILD_TIME \
+ "Build date/time: %s %s\n", __DATE__, __TIME__
+
/* Macros for usage messages */
#define ACPI_USAGE_HEADER(usage) \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 71743e5252f5..54b8d9df9423 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -223,6 +223,10 @@ void
acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types);
+
+void
acpi_db_create_execution_threads(char *num_threads_arg,
char *num_loops_arg, char *method_name_arg);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 95eed442703f..45ef3f5dc9ad 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -46,7 +46,7 @@
/*****************************************************************************
*
- * Globals related to the ACPI tables
+ * Globals related to the incoming ACPI tables
*
****************************************************************************/
@@ -87,7 +87,7 @@ ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
/*****************************************************************************
*
- * Mutual exclusion within ACPICA subsystem
+ * Mutual exclusion within the ACPICA subsystem
*
****************************************************************************/
@@ -167,7 +167,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
-/* Misc */
+/* Miscellaneous */
ACPI_GLOBAL(u32, acpi_gbl_original_mode);
ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
@@ -191,10 +191,9 @@ extern const char acpi_gbl_lower_hex_digits[];
extern const char acpi_gbl_upper_hex_digits[];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
/* Lists for tracking memory allocations (debug only) */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
@@ -203,7 +202,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
/*****************************************************************************
*
- * Namespace globals
+ * ACPI Namespace
*
****************************************************************************/
@@ -234,15 +233,20 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
/*****************************************************************************
*
- * Interpreter globals
+ * Interpreter/Parser globals
*
****************************************************************************/
-ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
+ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
+
+/* ASL/ASL+ converter */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_capture_comments, FALSE);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
/*****************************************************************************
*
@@ -252,7 +256,6 @@ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
@@ -263,7 +266,6 @@ ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
****************************************************************************/
#if (!ACPI_REDUCED_HARDWARE)
-
ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
ACPI_GLOBAL(struct acpi_gpe_block_info *,
@@ -272,10 +274,8 @@ ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
ACPI_GLOBAL(struct acpi_fixed_event_handler,
acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
-
extern struct acpi_fixed_event_info
acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-
#endif /* !ACPI_REDUCED_HARDWARE */
/*****************************************************************************
@@ -291,14 +291,14 @@ ACPI_GLOBAL(u32, acpi_gpe_count);
ACPI_GLOBAL(u32, acpi_sci_count);
ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
-/* Support for dynamic control method tracing mechanism */
+/* Dynamic control method tracing mechanism */
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
/*****************************************************************************
*
- * Debugger and Disassembler globals
+ * Debugger and Disassembler
*
****************************************************************************/
@@ -326,7 +326,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#endif
#ifdef ACPI_DEBUGGER
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
@@ -340,7 +339,6 @@ ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop);
ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated);
-
ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
@@ -350,32 +348,33 @@ ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
-/*
- * Statistic globals
- */
+/* Statistics globals */
+
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
ACPI_GLOBAL(u32, acpi_gbl_num_objects);
-
#endif /* ACPI_DEBUGGER */
#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
-
ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
-
#endif
-/*
- * Meant for the -ca option.
- */
+/*****************************************************************************
+ *
+ * ACPICA application-specific globals
+ *
+ ****************************************************************************/
+
+/* ASL-to-ASL+ conversion utility (implemented within the iASL compiler) */
+
+#ifdef ACPI_ASL_COMPILER
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
@@ -386,23 +385,18 @@ ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
-ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
@@ -410,30 +404,18 @@ ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
*acpi_gbl_comment_addr_list_head, NULL);
-
-ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
-ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
-
ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
-
-/*****************************************************************************
- *
- * Application globals
- *
- ****************************************************************************/
+#endif
#ifdef ACPI_APPLICATION
-
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
@@ -442,16 +424,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
-
#endif /* ACPI_APPLICATION */
-/*****************************************************************************
- *
- * Info/help support
- *
- ****************************************************************************/
-
-extern const struct ah_predefined_name asl_predefined_info[];
-extern const struct ah_device_id asl_device_ids[];
-
#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0d45b8bb1678..a56675f0661e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -622,7 +622,7 @@ struct acpi_control_state {
union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
- u32 loop_count; /* While() loop counter */
+ u64 loop_timeout; /* While() loop timeout */
};
/*
@@ -1218,16 +1218,17 @@ struct acpi_db_method_info {
acpi_object_type *types;
/*
- * Arguments to be passed to method for the command
- * Threads -
- * the Number of threads, ID of current thread and
- * Index of current thread inside all them created.
+ * Arguments to be passed to method for the commands Threads and
+ * Background. Note, ACPI specifies a maximum of 7 arguments (0 - 6).
+ *
+ * For the Threads command, the Number of threads, ID of current
+ * thread and Index of current thread inside all them created.
*/
char init_args;
#ifdef ACPI_DEBUGGER
- acpi_object_type arg_types[4];
+ acpi_object_type arg_types[ACPI_METHOD_NUM_ARGS];
#endif
- char *arguments[4];
+ char *arguments[ACPI_METHOD_NUM_ARGS];
char num_threads_str[11];
char id_of_thread_str[11];
char index_of_thread_str[11];
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c7f0c96cc00f..128a3d71b598 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -455,7 +455,7 @@
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
-#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_NAMESPACE(s, p, e) acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 54a0c51b3e37..2fb1bb78d85c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -289,6 +289,9 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
u8 no_trailing);
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path);
+
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 83b75e9db7ef..b6b29d717824 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -118,9 +118,6 @@ extern const char *acpi_gbl_ptyp_decode[];
#ifndef ACPI_MSG_ERROR
#define ACPI_MSG_ERROR "ACPI Error: "
#endif
-#ifndef ACPI_MSG_EXCEPTION
-#define ACPI_MSG_EXCEPTION "ACPI Exception: "
-#endif
#ifndef ACPI_MSG_WARNING
#define ACPI_MSG_WARNING "ACPI Warning: "
#endif
@@ -129,10 +126,10 @@ extern const char *acpi_gbl_ptyp_decode[];
#endif
#ifndef ACPI_MSG_BIOS_ERROR
-#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Error (bug): "
+#define ACPI_MSG_BIOS_ERROR "Firmware Error (ACPI): "
#endif
#ifndef ACPI_MSG_BIOS_WARNING
-#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Warning (bug): "
+#define ACPI_MSG_BIOS_WARNING "Firmware Warning (ACPI): "
#endif
/*
@@ -233,10 +230,10 @@ u64 acpi_ut_implicit_strtoul64(char *string);
*/
acpi_status acpi_ut_init_globals(void);
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
const char *acpi_ut_get_mutex_name(u32 mutex_id);
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
#endif
@@ -641,9 +638,11 @@ void ut_convert_backslashes(char *pathname);
void acpi_ut_repair_name(char *name);
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size);
+
u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
u8
@@ -737,9 +736,11 @@ acpi_ut_predefined_bios_error(const char *module_name,
u8 node_flags, const char *format, ...);
void
-acpi_ut_namespace_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status);
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_name,
+ acpi_status lookup_status);
void
acpi_ut_method_error(const char *module_name,
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 3b30319752f0..ed088fceb18d 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -67,6 +67,8 @@ static acpi_status
acpi_db_execution_walk(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context);
+
/*******************************************************************************
*
* FUNCTION: acpi_db_delete_objects
@@ -229,7 +231,7 @@ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info)
ACPI_FUNCTION_NAME(db_execute_setup);
- /* Catenate the current scope to the supplied name */
+ /* Concatenate the current scope to the supplied name */
info->pathname[0] = 0;
if ((info->name[0] != '\\') && (info->name[0] != '/')) {
@@ -611,6 +613,112 @@ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_single_execution_thread
+ *
+ * PARAMETERS: context - Method info struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create one thread and execute a method
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context)
+{
+ struct acpi_db_method_info *info = context;
+ acpi_status status;
+ struct acpi_buffer return_obj;
+
+ acpi_os_printf("\n");
+
+ status = acpi_db_execute_method(info, &return_obj);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s During evaluation of %s\n",
+ acpi_format_exception(status), info->pathname);
+ return;
+ }
+
+ /* Display a return object, if any */
+
+ if (return_obj.length) {
+ acpi_os_printf("Evaluation of %s returned object %p, "
+ "external buffer length %X\n",
+ acpi_gbl_db_method_info.pathname,
+ return_obj.pointer, (u32)return_obj.length);
+
+ acpi_db_dump_external_object(return_obj.pointer, 1);
+ }
+
+ acpi_os_printf("\nBackground thread completed\n%c ",
+ ACPI_DEBUGGER_COMMAND_PROMPT);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_create_execution_thread
+ *
+ * PARAMETERS: method_name_arg - Control method to execute
+ * arguments - Array of arguments to the method
+ * types - Corresponding array of object types
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles
+ * arguments passed on command line for control methods.
+ *
+ ******************************************************************************/
+
+void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types)
+{
+ acpi_status status;
+ u32 i;
+
+ memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info));
+ acpi_gbl_db_method_info.name = method_name_arg;
+ acpi_gbl_db_method_info.init_args = 1;
+ acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments;
+ acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types;
+
+ /* Setup method arguments, up to 7 (0-6) */
+
+ for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) {
+ acpi_gbl_db_method_info.arguments[i] = *arguments;
+ arguments++;
+
+ acpi_gbl_db_method_info.arg_types[i] = *types;
+ types++;
+ }
+
+ status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Get the NS node, determines existence also */
+
+ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname,
+ &acpi_gbl_db_method_info.method);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s Could not get handle for %s\n",
+ acpi_format_exception(status),
+ acpi_gbl_db_method_info.pathname);
+ return;
+ }
+
+ status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD,
+ acpi_db_single_execution_thread,
+ &acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ acpi_os_printf("\nBackground thread started\n");
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_create_execution_threads
*
* PARAMETERS: num_threads_arg - Number of threads to create
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index 4d81ea291d93..cf9607945704 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -99,8 +99,8 @@ void acpi_db_open_debug_file(char *name)
}
acpi_os_printf("Debug output file %s opened\n", name);
- strncpy(acpi_gbl_db_debug_filename, name,
- sizeof(acpi_gbl_db_debug_filename));
+ acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name,
+ sizeof(acpi_gbl_db_debug_filename));
acpi_gbl_db_output_to_file = TRUE;
}
#endif
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2626d79db064..954ca3b981a7 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -136,6 +136,7 @@ enum acpi_ex_debugger_commands {
CMD_UNLOAD,
CMD_TERMINATE,
+ CMD_BACKGROUND,
CMD_THREADS,
CMD_TEST,
@@ -212,6 +213,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"UNLOAD", 1},
{"TERMINATE", 0},
+ {"BACKGROUND", 1},
{"THREADS", 3},
{"TEST", 1},
@@ -222,9 +224,56 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
/*
* Help for all debugger commands. First argument is the number of lines
* of help to output for the command.
+ *
+ * Note: Some commands are not supported by the kernel-level version of
+ * the debugger.
*/
static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
- {0, "\nGeneral-Purpose Commands:", "\n"},
+ {0, "\nNamespace Access:", "\n"},
+ {1, " Businfo", "Display system bus info\n"},
+ {1, " Disassemble <Method>", "Disassemble a control method\n"},
+ {1, " Find <AcpiName> (? is wildcard)",
+ "Find ACPI name(s) with wildcards\n"},
+ {1, " Integrity", "Validate namespace integrity\n"},
+ {1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Namespace [Object] [Depth]",
+ "Display loaded namespace tree/subtree\n"},
+ {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
+ {1, " Objects [ObjectType]",
+ "Display summary of all objects or just given type\n"},
+ {1, " Owner <OwnerId> [Depth]",
+ "Display loaded namespace by object owner\n"},
+ {1, " Paths", "Display full pathnames of namespace objects\n"},
+ {1, " Predefined", "Check all predefined names\n"},
+ {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
+ {1, " References <Addr>", "Find all references to object at addr\n"},
+ {1, " Resources [DeviceName]",
+ "Display Device resources (no arg = all devices)\n"},
+ {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
+ {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
+ {1, " Type <Object>", "Display object type\n"},
+
+ {0, "\nControl Method Execution:", "\n"},
+ {1, " Evaluate <Namepath> [Arguments]",
+ "Evaluate object or control method\n"},
+ {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Background <Namepath> [Arguments]",
+ "Evaluate object/method in a separate thread\n"},
+ {1, " Thread <Threads><Loops><NamePath>",
+ "Spawn threads to execute method(s)\n"},
+#endif
+ {1, " Debug <Namepath> [Arguments]", "Single-Step a control method\n"},
+ {7, " [Arguments] formats:", "Control method argument formats\n"},
+ {1, " Hex Integer", "Integer\n"},
+ {1, " \"Ascii String\"", "String\n"},
+ {1, " (Hex Byte List)", "Buffer\n"},
+ {1, " (01 42 7A BF)", "Buffer example (4 bytes)\n"},
+ {1, " [Package Element List]", "Package\n"},
+ {1, " [0x01 0x1234 \"string\"]",
+ "Package example (3 elements)\n"},
+
+ {0, "\nMiscellaneous:", "\n"},
{1, " Allocations", "Display list of current memory allocations\n"},
{2, " Dump <Address>|<Namepath>", "\n"},
{0, " [Byte|Word|Dword|Qword]",
@@ -248,46 +297,30 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Stack", "Display CPU stack usage\n"},
{1, " Tables", "Info about current ACPI table(s)\n"},
{1, " Tables", "Display info about loaded ACPI tables\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Terminate", "Delete namespace and all internal objects\n"},
+#endif
{1, " ! <CommandNumber>", "Execute command from history buffer\n"},
{1, " !!", "Execute last command again\n"},
- {0, "\nNamespace Access Commands:", "\n"},
- {1, " Businfo", "Display system bus info\n"},
- {1, " Disassemble <Method>", "Disassemble a control method\n"},
- {1, " Find <AcpiName> (? is wildcard)",
- "Find ACPI name(s) with wildcards\n"},
- {1, " Integrity", "Validate namespace integrity\n"},
- {1, " Methods", "Display list of loaded control methods\n"},
- {1, " Namespace [Object] [Depth]",
- "Display loaded namespace tree/subtree\n"},
- {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
- {1, " Objects [ObjectType]",
- "Display summary of all objects or just given type\n"},
- {1, " Owner <OwnerId> [Depth]",
- "Display loaded namespace by object owner\n"},
- {1, " Paths", "Display full pathnames of namespace objects\n"},
- {1, " Predefined", "Check all predefined names\n"},
- {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
- {1, " References <Addr>", "Find all references to object at addr\n"},
- {1, " Resources [DeviceName]",
- "Display Device resources (no arg = all devices)\n"},
- {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
- {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
- {1, " Type <Object>", "Display object type\n"},
+ {0, "\nMethod and Namespace Debugging:", "\n"},
+ {5, " Trace <State> [<Namepath>] [Once]",
+ "Trace control method execution\n"},
+ {1, " Enable", "Enable all messages\n"},
+ {1, " Disable", "Disable tracing\n"},
+ {1, " Method", "Enable method execution messages\n"},
+ {1, " Opcode", "Enable opcode execution messages\n"},
+ {3, " Test <TestName>", "Invoke a debug test\n"},
+ {1, " Objects", "Read/write/compare all namespace data objects\n"},
+ {1, " Predefined",
+ "Validate all ACPI predefined names (_STA, etc.)\n"},
+ {1, " Execute predefined",
+ "Execute all predefined (public) methods\n"},
- {0, "\nControl Method Execution Commands:", "\n"},
+ {0, "\nControl Method Single-Step Execution:", "\n"},
{1, " Arguments (or Args)", "Display method arguments\n"},
{1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"},
{1, " Call", "Run to next control method invocation\n"},
- {1, " Debug <Namepath> [Arguments]", "Single Step a control method\n"},
- {6, " Evaluate", "Synonym for Execute\n"},
- {5, " Execute <Namepath> [Arguments]", "Execute control method\n"},
- {1, " Hex Integer", "Integer method argument\n"},
- {1, " \"Ascii String\"", "String method argument\n"},
- {1, " (Hex Byte List)", "Buffer method argument\n"},
- {1, " [Package Element List]", "Package method argument\n"},
- {5, " Execute predefined",
- "Execute all predefined (public) methods\n"},
{1, " Go", "Allow method to run to completion\n"},
{1, " Information", "Display info about the current method\n"},
{1, " Into", "Step into (not over) a method call\n"},
@@ -296,41 +329,24 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Results", "Display method result stack\n"},
{1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"},
{1, " Stop", "Terminate control method\n"},
- {5, " Trace <State> [<Namepath>] [Once]",
- "Trace control method execution\n"},
- {1, " Enable", "Enable all messages\n"},
- {1, " Disable", "Disable tracing\n"},
- {1, " Method", "Enable method execution messages\n"},
- {1, " Opcode", "Enable opcode execution messages\n"},
{1, " Tree", "Display control method calling tree\n"},
{1, " <Enter>", "Single step next AML opcode (over calls)\n"},
#ifdef ACPI_APPLICATION
- {0, "\nHardware Simulation Commands:", "\n"},
- {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
- {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
- {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
- {1, " Gpes", "Display info on all GPE devices\n"},
- {1, " Sci", "Generate an SCI\n"},
- {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
-
- {0, "\nFile I/O Commands:", "\n"},
+ {0, "\nFile Operations:", "\n"},
{1, " Close", "Close debug output file\n"},
{1, " Load <Input Filename>", "Load ACPI table from a file\n"},
{1, " Open <Output Filename>", "Open a file for debug output\n"},
{1, " Unload <Namepath>",
"Unload an ACPI table via namespace object\n"},
- {0, "\nUser Space Commands:", "\n"},
- {1, " Terminate", "Delete namespace and all internal objects\n"},
- {1, " Thread <Threads><Loops><NamePath>",
- "Spawn threads to execute method(s)\n"},
-
- {0, "\nDebug Test Commands:", "\n"},
- {3, " Test <TestName>", "Invoke a debug test\n"},
- {1, " Objects", "Read/write/compare all namespace data objects\n"},
- {1, " Predefined",
- "Execute all ACPI predefined names (_STA, etc.)\n"},
+ {0, "\nHardware Simulation:", "\n"},
+ {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
+ {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
+ {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
+ {1, " Gpes", "Display info on all GPE devices\n"},
+ {1, " Sci", "Generate an SCI\n"},
+ {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
#endif
{0, NULL, NULL}
};
@@ -442,11 +458,15 @@ static void acpi_db_display_help(char *command)
/* No argument to help, display help for all commands */
+ acpi_os_printf("\nSummary of AML Debugger Commands\n\n");
+
while (next->invocation) {
acpi_os_printf("%-38s%s", next->invocation,
next->description);
next++;
}
+ acpi_os_printf("\n");
+
} else {
/* Display help for all commands that match the subtring */
@@ -1087,6 +1107,13 @@ acpi_db_command_dispatch(char *input_buffer,
/* acpi_initialize (NULL); */
break;
+ case CMD_BACKGROUND:
+
+ acpi_db_create_execution_thread(acpi_gbl_db_args[1],
+ &acpi_gbl_db_args[2],
+ &acpi_gbl_db_arg_types[2]);
+ break;
+
case CMD_THREADS:
acpi_db_create_execution_threads(acpi_gbl_db_args[1],
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index f470e81b0499..4b6ebc2a2851 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -118,6 +118,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
control_state->control.package_end =
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
+ control_state->control.loop_timeout = acpi_os_get_timer() +
+ (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
@@ -206,15 +208,15 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
/* Predicate was true, the body of the loop was just executed */
/*
- * This loop counter mechanism allows the interpreter to escape
- * possibly infinite loops. This can occur in poorly written AML
- * when the hardware does not respond within a while loop and the
- * loop does not implement a timeout.
+ * This infinite loop detection mechanism allows the interpreter
+ * to escape possibly infinite loops. This can occur in poorly
+ * written AML when the hardware does not respond within a while
+ * loop and the loop does not implement a timeout.
*/
- control_state->control.loop_count++;
- if (control_state->control.loop_count >
- acpi_gbl_max_loop_iterations) {
- status = AE_AML_INFINITE_LOOP;
+ if (ACPI_TIME_AFTER(acpi_os_get_timer(),
+ control_state->control.
+ loop_timeout)) {
+ status = AE_AML_LOOP_TIMEOUT;
break;
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7bcf5f5ea029..0cab34a593d5 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -209,7 +209,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
}
@@ -383,7 +384,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
walk_state,
&info->connection_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(child->common.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ child->common.
value.name,
status);
return_ACPI_STATUS(status);
@@ -402,7 +405,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
return_ACPI_STATUS(status);
} else {
@@ -498,7 +502,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -618,7 +623,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
@@ -681,7 +687,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -695,7 +702,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -765,7 +773,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -778,7 +787,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 82448551781b..b21fe084ffc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -112,7 +112,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
acpi_namespace_node,
&(op->common.node)));
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(op->common.value.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ op->common.value.
string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 6d487edfe2de..5a602b75084e 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -297,8 +297,10 @@ acpi_ds_init_package_element(u8 object_type,
{
union acpi_operand_object **element_ptr;
+ ACPI_FUNCTION_TRACE(ds_init_package_element);
+
if (!source_object) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -329,7 +331,7 @@ acpi_ds_init_package_element(u8 object_type,
source_object->package.flags |= AOPOBJ_DATA_VALID;
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -352,6 +354,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
union acpi_generic_state scope_info;
union acpi_operand_object *element = *element_ptr;
struct acpi_namespace_node *resolved_node;
+ struct acpi_namespace_node *original_node;
char *external_path = NULL;
acpi_object_type type;
@@ -441,6 +444,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
* will remain as named references. This behavior is not described
* in the ACPI spec, but it appears to be an oversight.
*/
+ original_node = resolved_node;
status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
if (ACPI_FAILURE(status)) {
return_VOID;
@@ -468,26 +472,27 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
*/
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
-
- /* TBD: This may not be necesssary */
-
- acpi_ut_add_reference(resolved_node->object);
+ case ACPI_TYPE_METHOD:
break;
case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
+ /* acpi_ex_resolve_node_to_value gave these an extra reference */
+
+ acpi_ut_remove_reference(original_node->object);
break;
default:
/*
* For all other types - the node was resolved to an actual
- * operand object with a value, return the object
+ * operand object with a value, return the object. Remove
+ * a reference on the existing object.
*/
+ acpi_ut_remove_reference(element);
*element_ptr = (union acpi_operand_object *)resolved_node;
break;
}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 0dabd9b95684..4c5faf629a83 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -583,7 +583,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(name_string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ name_string, status);
}
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index eaa859a89702..5771e4e4a99a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -207,7 +207,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path,
+ status);
return_ACPI_STATUS(status);
}
@@ -375,7 +376,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ path, status);
return_ACPI_STATUS(status);
}
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index aad83ef5a4ec..b3d0aaec8203 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -184,11 +184,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
- ACPI_ERROR_NAMESPACE(buffer_ptr,
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ buffer_ptr,
status);
}
#else
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
@@ -343,7 +346,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
return_ACPI_STATUS(status);
}
@@ -719,7 +723,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
op->common.node = new_node;
} else {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
}
break;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 28b447ff92df..bb58419f0d61 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -298,6 +298,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
space_id)));
+
+ /*
+ * Special case for an EC timeout. These are seen so frequently
+ * that an additional error message is helpful
+ */
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_EC) &&
+ (status == AE_TIME)) {
+ ACPI_ERROR((AE_INFO,
+ "Timeout from EC hardware or EC device driver"));
+ }
}
if (!(handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 83398dc4b7c2..b2ff61bdb9a8 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -617,10 +617,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
- ACPI_FUNCTION_NAME(ex_dump_operand)
+ ACPI_FUNCTION_NAME(ex_dump_operand);
- /* Check if debug output enabled */
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
return;
}
@@ -904,7 +905,7 @@ void
acpi_ex_dump_operands(union acpi_operand_object **operands,
const char *opcode_name, u32 num_operands)
{
- ACPI_FUNCTION_NAME(ex_dump_operands);
+ ACPI_FUNCTION_TRACE(ex_dump_operands);
if (!opcode_name) {
opcode_name = "UNKNOWN";
@@ -928,7 +929,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"**** End operand dump for [%s]\n", opcode_name));
- return;
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index a2f4e25d45b1..5b4282902a83 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -150,10 +150,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
*
******************************************************************************/
acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)
{
acpi_status status;
- u32 delta_ticks;
+ u64 delta_ticks;
u64 quotient;
ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
@@ -168,30 +168,29 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(AE_SUPPORT);
}
+ if (start_ticks == end_ticks) {
+ *time_elapsed = 0;
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Compute Tick Delta:
* Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
*/
- if (start_ticks < end_ticks) {
- delta_ticks = end_ticks - start_ticks;
- } else if (start_ticks > end_ticks) {
+ delta_ticks = end_ticks;
+ if (start_ticks > end_ticks) {
if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
/* 24-bit Timer */
- delta_ticks =
- (((0x00FFFFFF - start_ticks) +
- end_ticks) & 0x00FFFFFF);
+ delta_ticks |= (u64)1 << 24;
} else {
/* 32-bit Timer */
- delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+ delta_ticks |= (u64)1 << 32;
}
- } else { /* start_ticks == end_ticks */
-
- *time_elapsed = 0;
- return_ACPI_STATUS(AE_OK);
}
+ delta_ticks -= start_ticks;
/*
* Compute Duration (Requires a 64-bit multiply and divide):
@@ -199,10 +198,10 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
* time_elapsed (microseconds) =
* (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
*/
- status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+ status = acpi_ut_short_divide(delta_ticks * ACPI_USEC_PER_SEC,
ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
- *time_elapsed = (u32) quotient;
+ *time_elapsed = (u32)quotient;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 3094cec4eab4..d1679035d5f3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -128,14 +128,14 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
acpi_io_address last_address;
const struct acpi_port_info *port_info;
- ACPI_FUNCTION_NAME(hw_validate_io_request);
+ ACPI_FUNCTION_TRACE(hw_validate_io_request);
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
ACPI_ERROR((AE_INFO,
"Bad BitWidth parameter: %8.8X", bit_width));
- return (AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
port_info = acpi_protected_ports;
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
ACPI_ERROR((AE_INFO,
"Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
ACPI_FORMAT_UINT64(address), byte_width));
- return (AE_LIMIT);
+ return_ACPI_STATUS(AE_LIMIT);
}
/* Exit if requested address is not within the protected port table */
if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Check request against the list of protected I/O ports */
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
}
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f2733f51ca8d..33e652a12fca 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -644,17 +644,18 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag &&
- (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags |= IMPLICIT_EXTERNAL;
- }
-#endif
}
/* Special handling for the last segment (num_segments == 0) */
else {
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag
+ && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags &= ~IMPLICIT_EXTERNAL;
+ }
+#endif
+
/*
* Sanity typecheck of the target object:
*
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 539d775bbc92..d55dcc82f434 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -495,7 +495,8 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
/* Check if we are resolving a named reference within a package */
- ACPI_ERROR_NAMESPACE(original_object->string.pointer, status);
+ ACPI_ERROR_NAMESPACE(&scope_info,
+ original_object->string.pointer, status);
goto error_exit;
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index a410760a0308..22c92d1a24d8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -49,6 +49,9 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
+/* Local Prototypes */
+static void acpi_ns_normalize_pathname(char *original_path);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -63,6 +66,7 @@ ACPI_MODULE_NAME("nsnames")
* for error and debug statements.
*
******************************************************************************/
+
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
char *name_buffer;
@@ -352,3 +356,148 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
return_PTR(name_buffer);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_prefixed_pathname
+ *
+ * PARAMETERS: prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Construct a fully qualified pathname from a concatenation of:
+ * 1) Path associated with the prefix_scope namespace node
+ * 2) External path representation of the Internal path
+ *
+ ******************************************************************************/
+
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path)
+{
+ acpi_status status;
+ char *full_path = NULL;
+ char *external_path = NULL;
+ char *prefix_path = NULL;
+ u32 prefix_path_length = 0;
+
+ /* If there is a prefix, get the pathname to it */
+
+ if (prefix_scope && prefix_scope->scope.node) {
+ prefix_path =
+ acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
+ TRUE);
+ if (prefix_path) {
+ prefix_path_length = strlen(prefix_path);
+ }
+ }
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path,
+ NULL, &external_path);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Merge the prefix path and the path. 2 is for one dot and trailing null */
+
+ full_path =
+ ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) +
+ 2);
+ if (!full_path) {
+ goto cleanup;
+ }
+
+ /* Don't merge if the External path is already fully qualified */
+
+ if (prefix_path && (*external_path != '\\') && (*external_path != '^')) {
+ strcat(full_path, prefix_path);
+ if (prefix_path[1]) {
+ strcat(full_path, ".");
+ }
+ }
+
+ acpi_ns_normalize_pathname(external_path);
+ strcat(full_path, external_path);
+
+cleanup:
+ if (prefix_path) {
+ ACPI_FREE(prefix_path);
+ }
+ if (external_path) {
+ ACPI_FREE(external_path);
+ }
+
+ return (full_path);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_normalize_pathname
+ *
+ * PARAMETERS: original_path - Path to be normalized, in External format
+ *
+ * RETURN: The original path is processed in-place
+ *
+ * DESCRIPTION: Remove trailing underscores from each element of a path.
+ *
+ * For example: \A___.B___.C___ becomes \A.B.C
+ *
+ ******************************************************************************/
+
+static void acpi_ns_normalize_pathname(char *original_path)
+{
+ char *input_path = original_path;
+ char *new_path_buffer;
+ char *new_path;
+ u32 i;
+
+ /* Allocate a temp buffer in which to construct the new path */
+
+ new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1);
+ new_path = new_path_buffer;
+ if (!new_path_buffer) {
+ return;
+ }
+
+ /* Special characters may appear at the beginning of the path */
+
+ if (*input_path == '\\') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ while (*input_path == '^') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ /* Remainder of the path */
+
+ while (*input_path) {
+
+ /* Do one nameseg at a time */
+
+ for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
+ *new_path = *input_path;
+ new_path++;
+ }
+
+ input_path++;
+ }
+
+ /* Dot means that there are more namesegs to come */
+
+ if (*input_path == '.') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+ }
+
+ *new_path = 0;
+ strcpy(original_path, new_path_buffer);
+ ACPI_FREE(new_path_buffer);
+}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5de8957f5ef0..e91dbee9235f 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -417,6 +417,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
+ new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 783f4c838aee..9b51f65823b2 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,10 +61,10 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
*
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
- * external_params - List of parameters to pass to method,
+ * external_params - List of parameters to pass to a method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
- * return_buffer - Where to put method's return value (if
+ * return_buffer - Where to put the object's return value (if
* any). If NULL, no value is returned.
* return_type - Expected type of return object
*
@@ -100,13 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
+ /* Get a handle here, in order to build an error message if needed */
+
+ target_handle = handle;
if (pathname) {
status = acpi_get_handle(handle, pathname, &target_handle);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- } else {
- target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index eb9dfaca555f..171e2faa7c50 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -361,7 +361,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
/* Final exception check (may have been changed from code above) */
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path, status);
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 0bef6df71bba..c0b179883ff2 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -372,16 +372,10 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
* external declaration opcode. Setting walk_state->Aml to
* walk_state->parser_state.Aml + 2 moves increments the
* walk_state->Aml past the object type and the paramcount of the
- * external opcode. For the error message, only print the AML
- * offset. We could attempt to print the name but this may cause
- * a segmentation fault when printing the namepath because the
- * AML may be incorrect.
+ * external opcode.
*/
- acpi_os_printf
- ("// Invalid external declaration at AML offset 0x%x.\n",
- walk_state->aml -
- walk_state->parser_state.aml_start);
walk_state->aml = walk_state->parser_state.aml + 2;
+ walk_state->parser_state.aml = walk_state->aml;
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
#endif
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 02642760cb93..cd59dfe6a47d 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -94,9 +94,11 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
- ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
- (acpi_ps_get_opcode_info(opcode))->
- name, sizeof(op->common.aml_op_name)));
+ ACPI_DISASM_ONLY_MEMBERS(acpi_ut_safe_strncpy(op->common.aml_op_name,
+ (acpi_ps_get_opcode_info
+ (opcode))->name,
+ sizeof(op->common.
+ aml_op_name)));
}
/*******************************************************************************
@@ -158,10 +160,10 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
if (opcode == AML_SCOPE_OP) {
acpi_gbl_current_scope = op;
}
- }
- if (gbl_capture_comments) {
- ASL_CV_TRANSFER_COMMENTS(op);
+ if (acpi_gbl_capture_comments) {
+ ASL_CV_TRANSFER_COMMENTS(op);
+ }
}
return (op);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 615a885e2ca3..cff7154b7fee 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -163,6 +163,9 @@ acpi_debug_print(u32 requested_debug_level,
{
acpi_thread_id thread_id;
va_list args;
+#ifdef ACPI_APPLICATION
+ int fill_count;
+#endif
/* Check if debug output enabled */
@@ -202,10 +205,21 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("[%u] ", (u32)thread_id);
}
- acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
-#endif
+ fill_count = 48 - acpi_gbl_nesting_level -
+ strlen(acpi_ut_trim_function_name(function_name));
+ if (fill_count < 0) {
+ fill_count = 0;
+ }
+
+ acpi_os_printf("[%02ld] %*s",
+ acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
+ acpi_os_printf("%s%*s: ",
+ acpi_ut_trim_function_name(function_name), fill_count,
+ " ");
+#else
acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
+#endif
va_start(args, format);
acpi_os_vprintf(format, args);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 02cd2c2d961a..55debbad487d 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -395,11 +395,6 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
return (acpi_gbl_ref_class_names[object->reference.class]);
}
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_mutex_name
@@ -433,6 +428,12 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
return (acpi_gbl_mutex_names[mutex_id]);
}
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/*
+ * Strings and procedures used for debug only
+ */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_notify_name
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index e3368186e1c1..42388dcb5ccc 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,6 +182,78 @@ acpi_ut_predefined_bios_error(const char *module_name,
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_prefixed_namespace_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname constructed this way:
+ *
+ * prefix_scope_node_full_path.externalized_internal_path
+ *
+ * NOTE: 10/2017: Treat the major ns_lookup errors as firmware errors
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_path,
+ acpi_status lookup_status)
+{
+ char *full_path;
+ const char *message;
+
+ /*
+ * Main cases:
+ * 1) Object creation, object must not already exist
+ * 2) Object lookup, object must exist
+ */
+ switch (lookup_status) {
+ case AE_ALREADY_EXISTS:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure creating";
+ break;
+
+ case AE_NOT_FOUND:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure looking up";
+ break;
+
+ default:
+
+ acpi_os_printf(ACPI_MSG_ERROR);
+ message = "Failure looking up";
+ break;
+ }
+
+ /* Concatenate the prefix path and the internal path */
+
+ full_path =
+ acpi_ns_build_prefixed_pathname(prefix_scope, internal_path);
+
+ acpi_os_printf("%s [%s], %s", message,
+ full_path ? full_path : "Could not get pathname",
+ acpi_format_exception(lookup_status));
+
+ if (full_path) {
+ ACPI_FREE(full_path);
+ }
+
+ ACPI_MSG_SUFFIX;
+}
+
+#ifdef __OBSOLETE_FUNCTION
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_namespace_error
*
* PARAMETERS: module_name - Caller's module name (for error output)
@@ -240,6 +312,7 @@ acpi_ut_namespace_error(const char *module_name,
ACPI_MSG_SUFFIX;
ACPI_MSG_REDIRECT_END;
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 23e766d1691d..45eeb0dcf283 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_next_owner_id_offset = 0;
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_osi_mutex = NULL;
- acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 5f9c680076c4..2055a858e5f5 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -134,7 +134,7 @@ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.hi = operand_ovl.part.lo;
- operand_ovl.part.lo ^= operand_ovl.part.lo;
+ operand_ovl.part.lo = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
@@ -171,7 +171,7 @@ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.lo = operand_ovl.part.hi;
- operand_ovl.part.hi ^= operand_ovl.part.hi;
+ operand_ovl.part.hi = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 586354788018..524ba931d5e8 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -286,8 +286,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %u could not acquire Mutex [0x%X]",
- (u32)this_thread_id, mutex_id));
+ "Thread %u could not acquire Mutex [%s] (0x%X)",
+ (u32)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
}
return (status);
@@ -322,8 +323,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [0x%X] is not acquired, cannot release",
- mutex_id));
+ "Mutex [%s] (0x%X) is not acquired, cannot release",
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
return (AE_NOT_ACQUIRED);
}
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 792664982ea3..33a0970646df 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,7 +140,7 @@ int acpi_ut_stricmp(char *string1, char *string2)
return (c1 - c2);
}
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
/*******************************************************************************
*
* FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
@@ -199,4 +199,13 @@ acpi_ut_safe_strncat(char *dest,
strncat(dest, source, max_transfer_length);
return (FALSE);
}
+
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
+{
+ /* Always terminate destination string */
+
+ strncpy(dest, source, dest_size);
+ dest[dest_size - 1] = 0;
+}
+
#endif
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 3175b133c0e4..f6b8dd24b006 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -101,6 +101,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
{"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
+ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
+ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 965fb5cec94f..97f48d71f9e6 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -52,10 +52,9 @@ static acpi_status
acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product);
-static acpi_status
-acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum);
/*******************************************************************************
*
@@ -357,7 +356,7 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
* FUNCTION: acpi_ut_strtoul_multiply64
*
* PARAMETERS: multiplicand - Current accumulated converted integer
- * multiplier - Base/Radix
+ * base - Base/Radix
* out_product - Where the product is returned
*
* RETURN: Status and 64-bit product
@@ -369,33 +368,40 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
******************************************************************************/
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product)
{
- u64 val;
+ u64 product;
+ u64 quotient;
/* Exit if either operand is zero */
*out_product = 0;
- if (!multiplicand || !multiplier) {
+ if (!multiplicand || !base) {
return (AE_OK);
}
- /* Check for 64-bit overflow before the actual multiplication */
-
- acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
- if (multiplicand > val) {
+ /*
+ * Check for 64-bit overflow before the actual multiplication.
+ *
+ * Notes: 64-bit division is often not supported on 32-bit platforms
+ * (it requires a library function), Therefore ACPICA has a local
+ * 64-bit divide function. Also, Multiplier is currently only used
+ * as the radix (8/10/16), to the 64/32 divide will always work.
+ */
+ acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL);
+ if (multiplicand > quotient) {
return (AE_NUMERIC_OVERFLOW);
}
- val = multiplicand * multiplier;
+ product = multiplicand * base;
/* Check for 32-bit overflow if necessary */
- if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+ if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) {
return (AE_NUMERIC_OVERFLOW);
}
- *out_product = val;
+ *out_product = product;
return (AE_OK);
}
@@ -404,7 +410,7 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
* FUNCTION: acpi_ut_strtoul_add64
*
* PARAMETERS: addend1 - Current accumulated converted integer
- * addend2 - New hex value/char
+ * digit - New hex value/char
* out_sum - Where sum is returned (Accumulator)
*
* RETURN: Status and 64-bit sum
@@ -415,17 +421,17 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
*
******************************************************************************/
-static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum)
{
u64 sum;
/* Check for 64-bit overflow before the actual addition */
- if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+ if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) {
return (AE_NUMERIC_OVERFLOW);
}
- sum = addend1 + addend2;
+ sum = addend1 + digit;
/* Check for 32-bit overflow if necessary */
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 3c8de88ecbd5..633b4e2c669f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -402,8 +402,8 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
allocation->component = component;
allocation->line = line;
- strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
- allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+ acpi_ut_safe_strncpy(allocation->module, (char *)module,
+ ACPI_MAX_MODULE_NAME);
if (!element) {
@@ -717,7 +717,7 @@ exit:
if (!num_outstanding) {
ACPI_INFO(("No outstanding allocations"));
} else {
- ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+ ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations",
num_outstanding, num_outstanding));
}
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 950a1e500bfa..9da4f8ef2e77 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -96,8 +96,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
*
* RETURN: None
*
- * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
- * and decoded acpi_status.
+ * DESCRIPTION: Print an "ACPI Error" message with module/line/version
+ * info as well as decoded acpi_status.
*
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE
@@ -111,10 +111,10 @@ acpi_exception(const char *module_name,
/* For AE_OK, just print the message */
if (ACPI_SUCCESS(status)) {
- acpi_os_printf(ACPI_MSG_EXCEPTION);
+ acpi_os_printf(ACPI_MSG_ERROR);
} else {
- acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_os_printf(ACPI_MSG_ERROR "%s, ",
acpi_format_exception(status));
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 6402f7fad3bb..1efefe919555 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -410,7 +410,52 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
flags = 0;
if (flags != -1)
- memory_failure_queue(pfn, 0, flags);
+ memory_failure_queue(pfn, flags);
+#endif
+}
+
+/*
+ * PCIe AER errors need to be sent to the AER driver for reporting and
+ * recovery. The GHES severities map to the following AER severities and
+ * require the following handling:
+ *
+ * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
+ * These need to be reported by the AER driver but no recovery is
+ * necessary.
+ * GHES_SEV_RECOVERABLE -> AER_NONFATAL
+ * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
+ * These both need to be reported and recovered from by the AER driver.
+ * GHES_SEV_PANIC does not make it to this handling since the kernel must
+ * panic.
+ */
+static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
+
+ if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(gdata->error_severity);
+
+ /*
+ * If firmware reset the component to contain
+ * the error, we must reinitialize it before
+ * use, so treat it as a fatal AER error.
+ */
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+ pcie_err->aer_info);
+ }
#endif
}
@@ -441,38 +486,9 @@ static void ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
ghes_handle_memory_failure(gdata, sev);
}
-#ifdef CONFIG_ACPI_APEI_PCIEAER
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
- struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
-
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
- pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- unsigned int devfn;
- int aer_severity;
-
- devfn = PCI_DEVFN(pcie_err->device_id.device,
- pcie_err->device_id.function);
- aer_severity = cper_severity_to_aer(gdata->error_severity);
-
- /*
- * If firmware reset the component to contain
- * the error, we must reinitialize it before
- * use, so treat it as a fatal AER error.
- */
- if (gdata->flags & CPER_SEC_RESET)
- aer_severity = AER_FATAL;
-
- aer_recover_queue(pcie_err->device_id.segment,
- pcie_err->device_id.bus,
- devfn, aer_severity,
- (struct aer_capability_regs *)
- pcie_err->aer_info);
- }
-
+ ghes_handle_aer(gdata);
}
-#endif
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
@@ -870,7 +886,6 @@ static void ghes_print_queued_estatus(void)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -882,8 +897,6 @@ static void ghes_print_queued_estatus(void)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = cper_estatus_len(estatus);
- node_len = GHES_ESTATUS_NODE_LEN(len);
generic = estatus_node->generic;
ghes_print_estatus(NULL, generic, estatus);
llnode = llnode->next;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 13e7b56e33ae..19bc440820e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,6 +70,7 @@ static async_cookie_t async_cookie;
static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
+static int battery_full_discharging;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -214,9 +215,12 @@ static int acpi_battery_get_property(struct power_supply *psy,
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+ if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
+ if (battery_full_discharging && battery->rate_now == 0)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1166,6 +1170,12 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
return 0;
}
+static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
+{
+ battery_full_discharging = 1;
+ return 0;
+}
+
static const struct dmi_system_id bat_dmi_table[] __initconst = {
{
.callback = battery_bix_broken_package_quirk,
@@ -1183,6 +1193,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
},
},
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS GL502VSK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
+ },
+ },
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS UX305LA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
+ },
+ },
{},
};
@@ -1237,13 +1263,11 @@ static int acpi_battery_add(struct acpi_device *device)
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
-#endif
if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
-#endif
goto fail;
}
+#endif
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 4d0979e02a28..f87ed3be779a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -785,6 +785,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
}
EXPORT_SYMBOL_GPL(acpi_match_device);
+void *acpi_get_match_data(const struct device *dev)
+{
+ const struct acpi_device_id *match;
+
+ if (!dev->driver)
+ return NULL;
+
+ if (!dev->driver->acpi_match_table)
+ return NULL;
+
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match)
+ return NULL;
+
+ return (void *)match->driver_data;
+}
+EXPORT_SYMBOL_GPL(acpi_get_match_data);
+
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index bf8e4d371fa7..e1eee7a60fad 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -30,6 +30,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <acpi/button.h>
#define PREFIX "ACPI: "
@@ -76,6 +77,22 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
+/*
+ * Some devices which don't even have a lid in anyway have a broken _LID
+ * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
+ */
+static const struct dmi_system_id lid_blacklst[] = {
+ {
+ /* GP-electronic T701 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
+ {}
+};
+
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
@@ -210,6 +227,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
}
/* Send the platform triggered reliable event */
if (do_update) {
+ acpi_handle_debug(device->handle, "ACPI LID %s\n",
+ state ? "open" : "closed");
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
button->last_state = !!state;
@@ -473,6 +492,9 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ return -ENODEV;
+
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
if (!button)
return -ENOMEM;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a4c8ad98560d..c4d0a1c912f0 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev)
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
- if (dev->power.direct_complete && pm_resume_via_firmware())
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_suspend_noirq(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * acpi_subsys_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
- return pm_generic_suspend_noirq(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
*/
int acpi_subsys_resume_noirq(struct device *dev)
{
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0252c9b9af3d..d9f38c645e4a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return ret;
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7af789e..dd70d6c2bca0 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
return -ENOMEM;
}
- if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+ if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
goto error;
if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
&first_ec->global_lock))
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 46f060356a22..f13ba2c07667 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -49,6 +49,11 @@
#define MODULE_NAME "acpi-ged"
+struct acpi_ged_device {
+ struct device *dev;
+ struct list_head event_list;
+};
+
struct acpi_ged_event {
struct list_head node;
struct device *dev;
@@ -76,7 +81,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
unsigned int irq;
unsigned int gsi;
unsigned int irqflags = IRQF_ONESHOT;
- struct device *dev = context;
+ struct acpi_ged_device *geddev = context;
+ struct device *dev = geddev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle evt_handle;
struct resource r;
@@ -102,8 +108,6 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
return AE_ERROR;
}
- dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
-
event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
if (!event)
return AE_ERROR;
@@ -116,29 +120,58 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
if (r.flags & IORESOURCE_IRQ_SHAREABLE)
irqflags |= IRQF_SHARED;
- if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
- irqflags, "ACPI:Ged", event)) {
+ if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
+ irqflags, "ACPI:Ged", event)) {
dev_err(dev, "failed to setup event handler for irq %u\n", irq);
return AE_ERROR;
}
+ dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+ list_add_tail(&event->node, &geddev->event_list);
return AE_OK;
}
static int ged_probe(struct platform_device *pdev)
{
+ struct acpi_ged_device *geddev;
acpi_status acpi_ret;
+ geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
+ if (!geddev)
+ return -ENOMEM;
+
+ geddev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&geddev->event_list);
acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
- acpi_ged_request_interrupt, &pdev->dev);
+ acpi_ged_request_interrupt, geddev);
if (ACPI_FAILURE(acpi_ret)) {
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+ platform_set_drvdata(pdev, geddev);
return 0;
}
+static void ged_shutdown(struct platform_device *pdev)
+{
+ struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ free_irq(event->irq, event);
+ list_del(&event->node);
+ dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
+ event->gsi, event->irq);
+ }
+}
+
+static int ged_remove(struct platform_device *pdev)
+{
+ ged_shutdown(pdev);
+ return 0;
+}
+
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
@@ -146,6 +179,8 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
+ .remove = ged_remove,
+ .shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7f43423de43c..1d0a501bc7f0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- unsigned long gpe;
+ u32 gpe;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 917f1cc0fda4..8ccaae3550d2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -460,8 +460,7 @@ int __init acpi_numa_init(void)
srat_proc, ARRAY_SIZE(srat_proc), 0);
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_memory_affinity, 0);
}
/* SLIT: System Locality Information Table */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index bc3d914dfc3e..85ad679390e3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -612,7 +612,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_USING;
- printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+ pr_info("%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
}
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index 90011aad4d20..886ac8b93cd0 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -400,7 +400,7 @@ static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
&intel_bxtwc_pmic_opregion_data);
}
-static struct platform_device_id bxt_wc_opregion_id_table[] = {
+static const struct platform_device_id bxt_wc_opregion_id_table[] = {
{ .name = "bxt_wcove_region" },
{},
};
@@ -412,9 +412,4 @@ static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
},
.id_table = bxt_wc_opregion_id_table,
};
-
-static int __init intel_bxtwc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
-}
-device_initcall(intel_bxtwc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 109c1e9c9c7a..f6d73a243d80 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -131,7 +131,4 @@ static struct platform_driver chtdc_ti_pmic_opregion_driver = {
},
.id_table = chtdc_ti_pmic_opregion_id_table,
};
-module_platform_driver(chtdc_ti_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 85636d7a9d39..9912422c8185 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -260,11 +260,10 @@ static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev)
&intel_cht_wc_pmic_opregion_data);
}
-static struct platform_device_id cht_wc_opregion_id_table[] = {
+static const struct platform_device_id cht_wc_opregion_id_table[] = {
{ .name = "cht_wcove_region" },
{},
};
-MODULE_DEVICE_TABLE(platform, cht_wc_opregion_id_table);
static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
.probe = intel_cht_wc_pmic_opregion_probe,
@@ -273,8 +272,4 @@ static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
},
.id_table = cht_wc_opregion_id_table,
};
-module_platform_driver(intel_cht_wc_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC operation region driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index d7f1761ab1bc..7ffa74048107 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -201,9 +201,4 @@ static struct platform_driver intel_crc_pmic_opregion_driver = {
.name = "crystal_cove_pmic",
},
};
-
-static int __init intel_crc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_crc_pmic_opregion_driver);
-}
-device_initcall(intel_crc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6c99d3f81095..316e55174aa9 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -278,9 +278,4 @@ static struct platform_driver intel_xpower_pmic_opregion_driver = {
.name = "axp288_pmic_acpi",
},
};
-
-static int __init intel_xpower_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_xpower_pmic_opregion_driver);
-}
-device_initcall(intel_xpower_pmic_opregion_driver_init);
+builtin_platform_driver(intel_xpower_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d50a7b6ccddd..5f0071c7e2e1 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -207,6 +207,7 @@ static void tsc_check_state(int state)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e26ea209b63e..466d1503aba0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,9 +1271,17 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return acpi_get_match_data(dev);
+}
+
#define DECLARE_ACPI_FWNODE_OPS(ops) \
const struct fwnode_operations ops = { \
.device_is_available = acpi_fwnode_device_is_available, \
+ .device_get_match_data = acpi_fwnode_device_get_match_data, \
.property_present = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8082871b409a..46cde0912762 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -367,10 +367,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{},
};
+static bool ignore_blacklist;
+
+void __init acpi_sleep_no_blacklist(void)
+{
+ ignore_blacklist = true;
+}
+
static void __init acpi_sleep_dmi_check(void)
{
int year;
+ if (ignore_blacklist)
+ return;
+
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
acpi_nvs_nosave_s3();
@@ -697,7 +707,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_S2IDLE_FUNC_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
+#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
+#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
@@ -900,7 +911,8 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
+ if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
+ (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 06a150bb35bf..4fc59c3bc673 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -816,14 +816,8 @@ end:
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
-
-/*
- * Currently, the GPE flooding prevention only supports to mask the GPEs
- * numbered from 00 to 7f.
- */
-#define ACPI_MASKABLE_GPE_MAX 0x80
-
-static u64 __initdata acpi_masked_gpes;
+#define ACPI_MASKABLE_GPE_MAX 0xFF
+static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
@@ -831,7 +825,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
return -EINVAL;
- acpi_masked_gpes |= ((u64)1<<gpe);
+ set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
@@ -843,15 +837,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
acpi_status status;
u8 gpe;
- for (gpe = 0;
- gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
- gpe++) {
- if (acpi_masked_gpes & ((u64)1<<gpe)) {
- status = acpi_get_gpe_device(gpe, &handle);
- if (ACPI_SUCCESS(status)) {
- pr_info("Masking GPE 0x%x.\n", gpe);
- (void)acpi_mask_gpe(handle, gpe, TRUE);
- }
+ for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
}
}
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 9d49a1acebe3..78db97687f26 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -737,16 +737,17 @@ bool acpi_dev_found(const char *hid)
}
EXPORT_SYMBOL(acpi_dev_found);
-struct acpi_dev_present_info {
+struct acpi_dev_match_info {
+ const char *dev_name;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
};
-static int acpi_dev_present_cb(struct device *dev, void *data)
+static int acpi_dev_match_cb(struct device *dev, void *data)
{
struct acpi_device *adev = to_acpi_device(dev);
- struct acpi_dev_present_info *match = data;
+ struct acpi_dev_match_info *match = data;
unsigned long long hrv;
acpi_status status;
@@ -757,6 +758,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
strcmp(adev->pnp.unique_id, match->uid)))
return 0;
+ match->dev_name = acpi_dev_name(adev);
+
if (match->hrv == -1)
return 1;
@@ -789,20 +792,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
*/
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
{
- struct acpi_dev_present_info match = {};
+ struct acpi_dev_match_info match = {};
struct device *dev;
strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
- dev = bus_find_device(&acpi_bus_type, NULL, &match,
- acpi_dev_present_cb);
-
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
+/**
+ * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return device name if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ struct acpi_dev_match_info match = {};
+ struct device *dev;
+
+ strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ match.uid = uid;
+ match.hrv = hrv;
+
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ return dev ? match.dev_name : NULL;
+}
+EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
* because __setup cannot be used in modules.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a7ecfde66b7b..cc89d0d2b965 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4311,7 +4311,7 @@ static int binder_thread_release(struct binder_proc *proc,
return active_transactions;
}
-static unsigned int binder_poll(struct file *filp,
+static __poll_t binder_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct binder_proc *proc = filp->private_data;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cb5339166563..a7120d621154 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -92,6 +92,25 @@ config SATA_AHCI
If unsure, say N.
+config SATA_MOBILE_LPM_POLICY
+ int "Default SATA Link Power Management policy for mobile chipsets"
+ range 0 4
+ default 0
+ depends on SATA_AHCI
+ help
+ Select the Default SATA Link Power Management (LPM) policy to use
+ for mobile / laptop variants of chipsets / "South Bridges".
+
+ The value set has the following meanings:
+ 0 => Keep firmware settings
+ 1 => Maximum performance
+ 2 => Medium power
+ 3 => Medium power with Device Initiated PM enabled
+ 4 => Minimum power
+
+ Note "Minimum power" is known to cause issues, including disk
+ corruption, with some disks and should not be used.
+
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
help
@@ -925,15 +944,6 @@ endif # ATA_BMDMA
comment "PIO-only SFF controllers"
-config PATA_AT32
- tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP
- help
- This option enables support for the IDE devices on the
- Atmel AT32AP platform.
-
- If unsure, say N.
-
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8daec3e657f8..f1f5a3fbc777 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
# SFF PIO only
-obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
obj-$(CONFIG_PATA_FALCON) += pata_falcon.o
obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5443cb71d7ba..355a95a83a34 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -65,6 +65,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_mobile,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
@@ -140,6 +141,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_mobile] = {
+ AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
@@ -252,13 +260,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
@@ -268,9 +276,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
@@ -293,9 +301,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -304,28 +312,28 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
@@ -353,26 +361,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
@@ -386,6 +394,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -593,6 +606,9 @@ static int marvell_enable = 1;
module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
+static int mobile_lpm_policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+module_param(mobile_lpm_policy, int, 0644);
+MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
@@ -1728,6 +1744,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
+ if ((hpriv->flags & AHCI_HFLAG_IS_MOBILE) &&
+ mobile_lpm_policy >= ATA_LPM_UNKNOWN &&
+ mobile_lpm_policy <= ATA_LPM_MIN_POWER)
+ ap->target_lpm_policy = mobile_lpm_policy;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 749fd94441b0..a9d996e17d75 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -251,6 +251,9 @@ enum {
AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
+ AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
+ SATA_MOBILE_LPM_POLICY
+ as default lpm_policy */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 5936d1679bf3..ea430819c80b 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -70,6 +70,13 @@
(DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \
(MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
+#define BUS_CTRL_ENDIAN_NSP_CONF \
+ (0x02 << DMADATA_ENDIAN_SHIFT | 0x02 << DMADESC_ENDIAN_SHIFT)
+
+#define BUS_CTRL_ENDIAN_CONF_MASK \
+ (0x3 << MMIO_ENDIAN_SHIFT | 0x3 << DMADESC_ENDIAN_SHIFT | \
+ 0x3 << DMADATA_ENDIAN_SHIFT | 0x3 << PIODATA_ENDIAN_SHIFT)
+
enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
@@ -89,14 +96,6 @@ struct brcm_ahci_priv {
enum brcm_ahci_version version;
};
-static const struct ata_port_info ahci_brcm_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
- .link_flags = ATA_LFLAG_NO_DB_DELAY,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
-};
-
static inline u32 brcm_sata_readreg(void __iomem *addr)
{
/*
@@ -250,20 +249,105 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
static void brcm_sata_init(struct brcm_ahci_priv *priv)
{
void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
+ u32 data;
/* Configure endianness */
- if (priv->version == BRCM_SATA_NSP) {
- u32 data = brcm_sata_readreg(ctrl);
-
- data &= ~((0x03 << DMADATA_ENDIAN_SHIFT) |
- (0x03 << DMADESC_ENDIAN_SHIFT));
- data |= (0x02 << DMADATA_ENDIAN_SHIFT) |
- (0x02 << DMADESC_ENDIAN_SHIFT);
- brcm_sata_writereg(data, ctrl);
- } else
- brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, ctrl);
+ data = brcm_sata_readreg(ctrl);
+ data &= ~BUS_CTRL_ENDIAN_CONF_MASK;
+ if (priv->version == BRCM_SATA_NSP)
+ data |= BUS_CTRL_ENDIAN_NSP_CONF;
+ else
+ data |= BUS_CTRL_ENDIAN_CONF;
+ brcm_sata_writereg(data, ctrl);
+}
+
+static unsigned int brcm_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_host *host = ap->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+ void __iomem *mmio = hpriv->mmio;
+ unsigned int err_mask;
+ unsigned long flags;
+ int i, rc;
+ u32 ctl;
+
+ /* Try to read the device ID and, if this fails, proceed with the
+ * recovery sequence below
+ */
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (likely(!err_mask))
+ return err_mask;
+
+ /* Disable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Perform the SATA PHY reset sequence */
+ brcm_sata_phy_disable(priv, ap->port_no);
+
+ /* Bring the PHY back on */
+ brcm_sata_phy_enable(priv, ap->port_no);
+
+ /* Re-initialize and calibrate the PHY */
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_phys;
+
+ rc = phy_calibrate(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ /* Re-enable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl |= HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ata_do_dev_read_id(dev, tf, id);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+ return AC_ERR_OTHER;
+}
+
+static void brcm_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ ahci_platform_disable_resources(hpriv);
}
+static struct ata_port_operations ahci_brcm_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = brcm_ahci_host_stop,
+ .read_id = brcm_ahci_read_id,
+};
+
+static const struct ata_port_info ahci_brcm_port_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+ .link_flags = ATA_LFLAG_NO_DB_DELAY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_brcm_platform_ops,
+};
+
#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b702c20fbc2b..7ecb1322a514 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -458,7 +458,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_2port_sata_byt] = &ich8_2port_map_db,
};
-static struct pci_bits piix_enable_bits[] = {
+static const struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
deleted file mode 100644
index 9aeb7a6dd4d4..000000000000
--- a/drivers/ata/pata_at32.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * AVR32 SMC/CFC PATA Driver
- *
- * Copyright (C) 2007 Atmel Norway
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#define DEBUG
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <scsi/scsi_host.h>
-#include <linux/ata.h>
-#include <linux/libata.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <mach/board.h>
-#include <mach/smc.h>
-
-#define DRV_NAME "pata_at32"
-#define DRV_VERSION "0.0.3"
-
-/*
- * CompactFlash controller memory layout relative to the base address:
- *
- * Attribute memory: 0000 0000 -> 003f ffff
- * Common memory: 0040 0000 -> 007f ffff
- * I/O memory: 0080 0000 -> 00bf ffff
- * True IDE Mode: 00c0 0000 -> 00df ffff
- * Alt IDE Mode: 00e0 0000 -> 00ff ffff
- *
- * Only True IDE and Alt True IDE mode are needed for this driver.
- *
- * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
- * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
- */
-#define CF_IDE_OFFSET 0x00c00000
-#define CF_ALT_IDE_OFFSET 0x00e00000
-#define CF_RES_SIZE 2048
-
-/*
- * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
- * adaptor with a logic analyzer or similar.
- */
-#undef DEBUG_BUS
-
-/*
- * ATA PIO modes
- *
- * Name | Mb/s | Min cycle time | Mask
- * --------+-------+----------------+--------
- * Mode 0 | 3.3 | 600 ns | 0x01
- * Mode 1 | 5.2 | 383 ns | 0x03
- * Mode 2 | 8.3 | 240 ns | 0x07
- * Mode 3 | 11.1 | 180 ns | 0x0f
- * Mode 4 | 16.7 | 120 ns | 0x1f
- *
- * Alter PIO_MASK below according to table to set maximal PIO mode.
- */
-enum {
- PIO_MASK = ATA_PIO4,
-};
-
-/*
- * Struct containing private information about device.
- */
-struct at32_ide_info {
- unsigned int irq;
- struct resource res_ide;
- struct resource res_alt;
- void __iomem *ide_addr;
- void __iomem *alt_addr;
- unsigned int cs;
- struct smc_config smc;
-};
-
-/*
- * Setup SMC for the given ATA timing.
- */
-static int pata_at32_setup_timing(struct device *dev,
- struct at32_ide_info *info,
- const struct ata_timing *ata)
-{
- struct smc_config *smc = &info->smc;
- struct smc_timing timing;
-
- int active;
- int recover;
-
- memset(&timing, 0, sizeof(struct smc_timing));
-
- /* Total cycle time */
- timing.read_cycle = ata->cyc8b;
-
- /* DIOR <= CFIOR timings */
- timing.nrd_setup = ata->setup;
- timing.nrd_pulse = ata->act8b;
- timing.nrd_recover = ata->rec8b;
-
- /* Convert nanosecond timing to clock cycles */
- smc_set_timing(smc, &timing);
-
- /* Add one extra cycle setup due to signal ring */
- smc->nrd_setup = smc->nrd_setup + 1;
-
- active = smc->nrd_setup + smc->nrd_pulse;
- recover = smc->read_cycle - active;
-
- /* Need at least two cycles recovery */
- if (recover < 2)
- smc->read_cycle = active + 2;
-
- /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
- smc->ncs_read_setup = 1;
- smc->ncs_read_pulse = smc->read_cycle - 2;
-
- /* Write timings same as read timings */
- smc->write_cycle = smc->read_cycle;
- smc->nwe_setup = smc->nrd_setup;
- smc->nwe_pulse = smc->nrd_pulse;
- smc->ncs_write_setup = smc->ncs_read_setup;
- smc->ncs_write_pulse = smc->ncs_read_pulse;
-
- /* Do some debugging output of ATA and SMC timings */
- dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n",
- ata->cyc8b, ata->setup, ata->act8b, ata->rec8b);
-
- dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n",
- smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
- smc->ncs_read_setup, smc->ncs_read_pulse);
-
- /* Finally, configure the SMC */
- return smc_set_configuration(info->cs, smc);
-}
-
-/*
- * Procedures for libATA.
- */
-static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing timing;
- struct at32_ide_info *info = ap->host->private_data;
-
- int ret;
-
- /* Compute ATA timing */
- ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
- if (ret) {
- dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
- return;
- }
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(ap->dev, info, &timing);
- if (ret) {
- dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
- return;
- }
-}
-
-static struct scsi_host_template at32_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations at32_port_ops = {
- .inherits = &ata_sff_port_ops,
- .cable_detect = ata_cable_40wire,
- .set_piomode = pata_at32_set_piomode,
-};
-
-static int __init pata_at32_init_one(struct device *dev,
- struct at32_ide_info *info)
-{
- struct ata_host *host;
- struct ata_port *ap;
-
- host = ata_host_alloc(dev, 1);
- if (!host)
- return -ENOMEM;
-
- ap = host->ports[0];
-
- /* Setup ATA bindings */
- ap->ops = &at32_port_ops;
- ap->pio_mask = PIO_MASK;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
-
- /*
- * Since all 8-bit taskfile transfers has to go on the lower
- * byte of the data bus and there is a bug in the SMC that
- * makes it impossible to alter the bus width during runtime,
- * we need to hardwire the address signals as follows:
- *
- * A_IDE(2:0) <= A_EBI(3:1)
- *
- * This makes all addresses on the EBI even, thus all data
- * will be on the lower byte of the data bus. All addresses
- * used by libATA need to be altered according to this.
- */
- ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
- ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1);
-
- ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1);
- ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1);
- ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1);
- ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1);
- ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1);
- ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1);
- ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1);
- ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1);
- ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1);
- ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1);
-
- /* Set info as private data of ATA host */
- host->private_data = info;
-
- /* Register ATA device and return */
- return ata_host_activate(host, info->irq, ata_sff_interrupt,
- IRQF_SHARED | IRQF_TRIGGER_RISING,
- &at32_sht);
-}
-
-/*
- * This function may come in handy for people analyzing their own
- * EBI -> PATA adaptors.
- */
-#ifdef DEBUG_BUS
-
-static void __init pata_at32_debug_bus(struct device *dev,
- struct at32_ide_info *info)
-{
- const int d1 = 0xff;
- const int d2 = 0x00;
-
- int i;
-
- /* Write 8-bit values (registers) */
- iowrite8(d1, info->alt_addr + (0x06 << 1));
- iowrite8(d2, info->alt_addr + (0x06 << 1));
-
- for (i = 0; i < 8; i++) {
- iowrite8(d1, info->ide_addr + (i << 1));
- iowrite8(d2, info->ide_addr + (i << 1));
- }
-
- /* Write 16 bit values (data) */
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-}
-
-#endif
-
-static int __init pata_at32_probe(struct platform_device *pdev)
-{
- const struct ata_timing initial_timing =
- {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
-
- struct device *dev = &pdev->dev;
- struct at32_ide_info *info;
- struct ide_platform_data *board = dev_get_platdata(&pdev->dev);
- struct resource *res;
-
- int irq;
- int ret;
-
- if (!board)
- return -ENXIO;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- /* Retrive IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- /* Setup struct containing private information */
- info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->irq = irq;
- info->cs = board->cs;
-
- /* Request memory resources */
- info->res_ide.start = res->start + CF_IDE_OFFSET;
- info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1;
- info->res_ide.name = "ide";
- info->res_ide.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_ide);
- if (ret)
- goto err_req_res_ide;
-
- info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
- info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1;
- info->res_alt.name = "alt";
- info->res_alt.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_alt);
- if (ret)
- goto err_req_res_alt;
-
- /* Setup non-timing elements of SMC */
- info->smc.bus_width = 2; /* 16 bit data bus */
- info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
- info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
- info->smc.nwait_mode = 3; /* NWAIT is in READY mode */
- info->smc.byte_write = 0; /* Byte select access type */
- info->smc.tdf_mode = 0; /* TDF optimization disabled */
- info->smc.tdf_cycles = 0; /* No TDF wait cycles */
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(dev, info, &initial_timing);
- if (ret)
- goto err_setup_timing;
-
- /* Map ATA address space */
- ret = -ENOMEM;
- info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
- info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
- if (!info->ide_addr || !info->alt_addr)
- goto err_ioremap;
-
-#ifdef DEBUG_BUS
- pata_at32_debug_bus(dev, info);
-#endif
-
- /* Setup and register ATA device */
- ret = pata_at32_init_one(dev, info);
- if (ret)
- goto err_ata_device;
-
- return 0;
-
- err_ata_device:
- err_ioremap:
- err_setup_timing:
- release_resource(&info->res_alt);
- err_req_res_alt:
- release_resource(&info->res_ide);
- err_req_res_ide:
- kfree(info);
-
- return ret;
-}
-
-static int __exit pata_at32_remove(struct platform_device *pdev)
-{
- struct ata_host *host = platform_get_drvdata(pdev);
- struct at32_ide_info *info;
-
- if (!host)
- return 0;
-
- info = host->private_data;
- ata_host_detach(host);
-
- if (!info)
- return 0;
-
- release_resource(&info->res_ide);
- release_resource(&info->res_alt);
-
- kfree(info);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:at32_ide");
-
-static struct platform_driver pata_at32_driver = {
- .remove = __exit_p(pata_at32_remove),
- .driver = {
- .name = "at32_ide",
- },
-};
-
-module_platform_driver_probe(pata_at32_driver, pata_at32_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
-MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 49d705c9f0f7..4d49fd3c927b 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,6 +278,10 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &info };
+ /* SB600 doesn't have secondary port wired */
+ if((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE))
+ ppi[1] = &ata_dummy_port_info;
+
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 7a21edf89e72..8468b300193b 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -683,7 +683,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
return (u8 *)buf;
}
- mdelay(1);
+ usleep_range(500, 1000);
}
kfree(buf);
printk(KERN_ERR "it821x_firmware_command: timeout\n");
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6db2e34bd52f..1a18e675ba9f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -580,7 +580,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
ioread16(mmio_base + PDC_PLL_CTL); /* flush */
/* Wait the PLL circuit to be stable */
- mdelay(30);
+ msleep(30);
#ifdef PDC_DEBUG
/*
@@ -620,7 +620,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
start_time = ktime_get();
/* Let the counter run for 100 ms. */
- mdelay(100);
+ msleep(100);
/* Read the counter values again */
end_count = pdc_read_counter(host);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index cc208b72b199..42d4589b43d4 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3596,7 +3596,7 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
hpriv->ops->phy_errata(hpriv, mmio, port_no);
if (IS_GEN_I(hpriv))
- mdelay(1);
+ usleep_range(500, 1000);
}
static void mv_pmp_select(struct ata_port *ap, int pmp)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 1d60b58a8c19..fe4b24f05f6a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -569,7 +569,7 @@ store_hard_offline_page(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, 0, 0);
+ ret = memory_failure(pfn, 0);
return ret ? ret : count;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0c80bea05bcb..528b24149bc7 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev)
static int genpd_finish_suspend(struct device *dev, bool poweroff)
{
struct generic_pm_domain *genpd;
- int ret;
+ int ret = 0;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
-
if (poweroff)
ret = pm_generic_poweroff_noirq(dev);
else
@@ -1048,10 +1045,19 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_suspend(dev);
- if (ret)
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+ return 0;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ if (poweroff)
+ pm_generic_restore_noirq(dev);
+ else
+ pm_generic_resume_noirq(dev);
return ret;
+ }
}
genpd_lock(genpd);
@@ -1085,7 +1091,7 @@ static int genpd_suspend_noirq(struct device *dev)
static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret = 0;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -1094,21 +1100,21 @@ static int genpd_resume_noirq(struct device *dev)
return -EINVAL;
if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
+ return pm_generic_resume_noirq(dev);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_resume(dev);
-
- ret = pm_generic_resume_noirq(dev);
- if (ret)
- return ret;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return pm_generic_resume_noirq(dev);
}
/**
@@ -1135,8 +1141,9 @@ static int genpd_freeze_noirq(struct device *dev)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_suspend(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev))
+ ret = genpd_stop_dev(genpd, dev);
return ret;
}
@@ -1159,8 +1166,9 @@ static int genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -1217,8 +1225,9 @@ static int genpd_restore_noirq(struct device *dev)
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -2199,20 +2208,8 @@ int genpd_dev_pm_attach(struct device *dev)
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", 0, &pd_args);
- if (ret < 0) {
- if (ret != -ENOENT)
- return ret;
-
- /*
- * Try legacy Samsung-specific bindings
- * (for backwards compatibility of DT ABI)
- */
- pd_args.args_count = 0;
- pd_args.np = of_parse_phandle(dev->of_node,
- "samsung,power-domain", 0);
- if (!pd_args.np)
- return -ENOENT;
- }
+ if (ret < 0)
+ return ret;
mutex_lock(&gpd_list_lock);
pd = genpd_get_from_provider(&pd_args);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 08744b572af6..02a497e7c785 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/kallsyms.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -541,6 +540,73 @@ void dev_pm_skip_next_resume_phases(struct device *dev)
}
/**
+ * suspend_event - Return a "suspend" message for given "resume" one.
+ * @resume_msg: PM message representing a system-wide resume transition.
+ */
+static pm_message_t suspend_event(pm_message_t resume_msg)
+{
+ switch (resume_msg.event) {
+ case PM_EVENT_RESUME:
+ return PMSG_SUSPEND;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RESTORE:
+ return PMSG_FREEZE;
+ case PM_EVENT_RECOVER:
+ return PMSG_HIBERNATE;
+ }
+ return PMSG_ON;
+}
+
+/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+ return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -551,8 +617,9 @@ void dev_pm_skip_next_resume_phases(struct device *dev)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool skip_resume;
int error = 0;
TRACE_DEVICE(dev);
@@ -566,29 +633,61 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
+ skip_resume = dev_pm_may_skip_resume(dev);
+
+ callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
+
+ if (skip_resume)
+ goto Skip;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ pm_message_t suspend_msg = suspend_event(state);
+
+ /*
+ * If "freeze" callbacks have been skipped during a transition
+ * related to hibernation, the subsequent "thaw" callbacks must
+ * be skipped too or bad things may happen. Otherwise, resume
+ * callbacks are going to be run for the device, so its runtime
+ * PM status must be changed to reflect the new state after the
+ * transition under way.
+ */
+ if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
+ !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
+ if (state.event == PM_EVENT_THAW) {
+ skip_resume = true;
+ goto Skip;
+ } else {
+ pm_runtime_set_active(dev);
+ }
+ }
}
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_noirq_suspended = false;
- Out:
+ if (skip_resume) {
+ /*
+ * The device is going to be left in suspend, but it might not
+ * have been in runtime suspend before the system suspended, so
+ * its runtime PM status needs to be updated to avoid confusing
+ * the runtime PM framework when runtime PM is enabled for the
+ * device again.
+ */
+ pm_runtime_set_suspended(dev);
+ dev_pm_skip_next_resume_phases(dev);
+ }
+
+Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
@@ -681,6 +780,35 @@ void dpm_resume_noirq(pm_message_t state)
dpm_noirq_end();
}
+static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -691,8 +819,8 @@ void dpm_resume_noirq(pm_message_t state)
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -706,19 +834,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_resume_early_cb(dev, state, &info);
if (!callback && dev->driver && dev->driver->pm) {
info = "early driver ";
@@ -1089,6 +1205,77 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static bool device_must_resume(struct device *dev, pm_message_t state,
+ bool no_subsys_suspend_noirq)
+{
+ pm_message_t resume_msg = resume_event(state);
+
+ /*
+ * If all of the device driver's "noirq", "late" and "early" callbacks
+ * are invoked directly by the core, the decision to allow the device to
+ * stay in suspend can be based on its current runtime PM status and its
+ * wakeup settings.
+ */
+ if (no_subsys_suspend_noirq &&
+ !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
+ !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
+ !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
+ return !pm_runtime_status_suspended(dev) &&
+ (resume_msg.event != PM_EVENT_RESUME ||
+ (device_can_wakeup(dev) && !device_may_wakeup(dev)));
+
+ /*
+ * The only safe strategy here is to require that if the device may not
+ * be left in suspend, resume callbacks must be invoked for it.
+ */
+ return !dev->power.may_skip_resume;
+}
+
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1100,8 +1287,9 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool no_subsys_cb = false;
int error = 0;
TRACE_DEVICE(dev);
@@ -1120,30 +1308,40 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
+
+ if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_noirq_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+
+Skip:
+ dev->power.is_noirq_suspended = true;
+
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+ dev->power.must_resume = dev->power.must_resume ||
+ atomic_read(&dev->power.usage_count) > 1 ||
+ device_must_resume(dev, state, no_subsys_cb);
+ } else {
+ dev->power.must_resume = true;
+ }
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
@@ -1249,6 +1447,50 @@ int dpm_suspend_noirq(pm_message_t state)
return ret;
}
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ if (dev->power.wakeup_path && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1259,8 +1501,8 @@ int dpm_suspend_noirq(pm_message_t state)
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -1281,30 +1523,29 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev_pm_smart_suspend_and_suspended(dev) &&
+ !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_late_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+ dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+ dev->power.is_late_suspended = true;
Complete:
TRACE_SUSPEND(error);
@@ -1435,11 +1676,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
-static void dpm_clear_suppliers_direct_complete(struct device *dev)
+static void dpm_clear_superiors_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
+ if (dev->parent) {
+ spin_lock_irq(&dev->parent->power.lock);
+ dev->parent->power.direct_complete = false;
+ spin_unlock_irq(&dev->parent->power.lock);
+ }
+
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
@@ -1500,6 +1747,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
+ dev->power.may_skip_resume = false;
+ dev->power.must_resume = false;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1543,20 +1793,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
- struct device *parent = dev->parent;
-
dev->power.is_suspended = true;
- if (parent) {
- spin_lock_irq(&parent->power.lock);
-
- dev->parent->power.direct_complete = false;
- if (dev->power.wakeup_path
- && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
+ if (device_may_wakeup(dev))
+ dev->power.wakeup_path = true;
- spin_unlock_irq(&parent->power.lock);
- }
- dpm_clear_suppliers_direct_complete(dev);
+ dpm_propagate_wakeup_to_parent(dev);
+ dpm_clear_superiors_direct_complete(dev);
}
device_unlock(dev);
@@ -1665,8 +1907,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- !pm_runtime_enabled(dev));
+ WARN_ON(!pm_runtime_enabled(dev) &&
+ dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED));
/*
* If a device's parent goes into runtime suspend at the wrong time,
@@ -1678,7 +1921,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- dev->power.wakeup_path = device_may_wakeup(dev);
+ dev->power.wakeup_path = false;
if (dev->power.no_pm_callbacks) {
ret = 1; /* Let device go direct_complete */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 7beee75399d4..21244c53e377 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -41,20 +41,15 @@ extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
-extern int device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq);
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
extern void device_wakeup_detach_irq(struct device *dev);
extern void device_wakeup_arm_wake_irqs(void);
extern void device_wakeup_disarm_wake_irqs(void);
#else
-static inline int
-device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq)
-{
- return 0;
-}
+static inline void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq) {}
static inline void device_wakeup_detach_irq(struct device *dev)
{
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6e89b51ea3d9..8bef3cb2424d 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1613,22 +1613,34 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
* Disable runtime PM so we safely can check the device's runtime PM status and
- * if it is active, invoke it's .runtime_suspend callback to bring it into
- * suspend state. Keep runtime PM disabled to preserve the state unless we
- * encounter errors.
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent). Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
*
* Typically this function may be invoked from a system suspend callback to make
- * sure the device is put into low power state.
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states. It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
*/
int pm_runtime_force_suspend(struct device *dev)
{
int (*callback)(struct device *);
- int ret = 0;
+ int ret;
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev))
@@ -1636,27 +1648,23 @@ int pm_runtime_force_suspend(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- if (!callback) {
- ret = -ENOSYS;
- goto err;
- }
-
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret)
goto err;
/*
- * Increase the runtime PM usage count for the device's parent, in case
- * when we find the device being used when system suspend was invoked.
- * This informs pm_runtime_force_resume() to resume the parent
- * immediately, which is needed to be able to resume its children,
- * when not deferring the resume to be managed via runtime PM.
+ * If the device can stay in suspend after the system-wide transition
+ * to the working state that will follow, drop the children counter of
+ * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ * function will be called again for it in the meantime.
*/
- if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
- pm_runtime_get_noresume(dev->parent);
+ if (pm_runtime_need_not_resume(dev))
+ pm_runtime_set_suspended(dev);
+ else
+ __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_set_suspended(dev);
return 0;
+
err:
pm_runtime_enable(dev);
return ret;
@@ -1669,13 +1677,9 @@ EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power, if it is expected to be
- * used on system resume. To distinguish that, we check whether the runtime PM
- * usage count is greater than 1 (the PM core increases the usage count in the
- * system PM prepare phase), as that indicates a real user (such as a subsystem,
- * driver, userspace, etc.) is using it. If that is the case, the device is
- * expected to be used on system resume as well, so then we resume it. In the
- * other case, we defer the resume to be managed via runtime PM.
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume. In the other case, we defer the resume to be managed
+ * via runtime PM.
*
* Typically this function may be invoked from a system resume callback.
*/
@@ -1684,32 +1688,18 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
-
- if (!callback) {
- ret = -ENOSYS;
- goto out;
- }
-
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
goto out;
/*
- * Decrease the parent's runtime PM usage count, if we increased it
- * during system suspend in pm_runtime_force_suspend().
- */
- if (atomic_read(&dev->power.usage_count) > 1) {
- if (dev->parent)
- pm_runtime_put_noidle(dev->parent);
- } else {
- goto out;
- }
+ * The value of the parent's children counter is correct already, so
+ * just update the status of the device.
+ */
+ __update_runtime_status(dev, RPM_ACTIVE);
- ret = pm_runtime_set_active(dev);
- if (ret)
- goto out;
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e153e28b1857..0f651efc58a1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr,
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
device_lock(dev);
- if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+ if (sysfs_streq(buf, ctrl_auto))
pm_runtime_allow(dev);
- else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+ else if (sysfs_streq(buf, ctrl_on))
pm_runtime_forbid(dev);
else
n = -EINVAL;
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
return n;
}
-static DEVICE_ATTR(control, 0644, control_show, control_store);
+static DEVICE_ATTR_RW(control);
-static ssize_t rtpm_active_time_show(struct device *dev,
+static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_active_time);
-static ssize_t rtpm_suspended_time_show(struct device *dev,
+static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_suspended_time);
-static ssize_t rtpm_status_show(struct device *dev,
+static ssize_t runtime_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *p;
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev,
return sprintf(buf, p);
}
-static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
- autosuspend_delay_ms_store);
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
-static ssize_t pm_qos_resume_latency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sprintf(buf, "n/a\n");
- else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_resume_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
if (value == 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ } else if (sysfs_streq(buf, "n/a")) {
value = 0;
} else {
return -EINVAL;
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
-static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
return sprintf(buf, "auto\n");
- else if (value == PM_QOS_LATENCY_ANY)
+ if (value == PM_QOS_LATENCY_ANY)
return sprintf(buf, "any\n");
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
if (value < 0)
return -EINVAL;
} else {
- if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ if (sysfs_streq(buf, "auto"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
- else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ else if (sysfs_streq(buf, "any"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
- pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_no_power_off, 0644,
- pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
-static ssize_t
-wake_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
-static ssize_t
-wake_store(struct device * dev, struct device_attribute *attr,
- const char * buf, size_t n)
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
if (!device_can_wakeup(dev))
return -EINVAL;
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1
- && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
+ if (sysfs_streq(buf, _enabled))
device_set_wakeup_enable(dev, 1);
- else if (len == sizeof _disabled - 1
- && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_count);
static ssize_t wakeup_active_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active_count);
static ssize_t wakeup_abort_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_abort_count);
static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned int active = 0;
bool enabled = false;
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev,
return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active);
-static ssize_t wakeup_total_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
-static ssize_t wakeup_max_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
-static ssize_t wakeup_last_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
-static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
- wakeup_prevent_sleep_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
-static ssize_t rtpm_usagecount_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_usage_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
+static DEVICE_ATTR_RO(runtime_usage);
-static ssize_t rtpm_children_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_active_kids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
+static DEVICE_ATTR_RO(runtime_active_kids);
-static ssize_t rtpm_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ if (dev->power.disable_depth && (dev->power.runtime_auto == false))
return sprintf(buf, "disabled & forbidden\n");
- else if (dev->power.disable_depth)
+ if (dev->power.disable_depth)
return sprintf(buf, "disabled\n");
- else if (dev->power.runtime_auto == false)
+ if (dev->power.runtime_auto == false)
return sprintf(buf, "forbidden\n");
return sprintf(buf, "enabled\n");
}
-
-static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
-static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
-static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+static DEVICE_ATTR_RO(runtime_enabled);
#ifdef CONFIG_PM_SLEEP
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
+ if (sysfs_streq(buf, _enabled))
device_enable_async_suspend(dev);
- else if (len == sizeof _disabled - 1 &&
- strncmp(buf, _disabled, len) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_disable_async_suspend(dev);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(async, 0644, async_show, async_store);
+static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index ae0429827f31..a8ac86e4d79e 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -33,7 +33,6 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
struct wake_irq *wirq)
{
unsigned long flags;
- int err;
if (!dev || !wirq)
return -EINVAL;
@@ -45,12 +44,11 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
return -EEXIST;
}
- err = device_wakeup_attach_irq(dev, wirq);
- if (!err)
- dev->power.wakeirq = wirq;
+ dev->power.wakeirq = wirq;
+ device_wakeup_attach_irq(dev, wirq);
spin_unlock_irqrestore(&dev->power.lock, flags);
- return err;
+ return 0;
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 38559f04db2c..ea01621ed769 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -19,6 +19,11 @@
#include "power.h"
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+#endif
+
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -268,6 +273,9 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
+ if (pm_suspend_target_state != PM_SUSPEND_ON)
+ dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
ws = wakeup_source_register(dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -291,22 +299,19 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
*
* Call under the device's power.lock lock.
*/
-int device_wakeup_attach_irq(struct device *dev,
+void device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
- if (!ws) {
- dev_err(dev, "forgot to call call device_init_wakeup?\n");
- return -EINVAL;
- }
+ if (!ws)
+ return;
if (ws->wakeirq)
- return -EEXIST;
+ dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
ws->wakeirq = wakeirq;
- return 0;
}
/**
@@ -448,9 +453,7 @@ int device_init_wakeup(struct device *dev, bool enable)
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
} else {
- if (dev->power.can_wakeup)
- device_wakeup_disable(dev);
-
+ device_wakeup_disable(dev);
device_set_wakeup_capable(dev, false);
}
@@ -464,9 +467,6 @@ EXPORT_SYMBOL_GPL(device_init_wakeup);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 613ba820f545..96aa71c93daf 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1418,3 +1418,10 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
+
+void *device_get_match_data(struct device *dev)
+{
+ return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
+ dev);
+}
+EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 3a1535d812d8..067073e4beb1 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,7 +6,6 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
- select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -39,5 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
-config REGMAP_HWSPINLOCK
- bool
+config REGMAP_SOUNDWIRE
+ tristate
+ depends on SOUNDWIRE_BUS
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0d298c446108..22d263cca395 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 8641183cac2f..53785e0e297a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -77,6 +77,7 @@ struct regmap {
int async_ret;
#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
struct dentry *debugfs;
const char *debugfs_name;
@@ -215,10 +216,17 @@ struct regmap_field {
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
#else
static inline void regmap_debugfs_initcall(void) { }
static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
#endif
/* regcache core declarations */
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 4d2e50bfc726..bc6cd88b8cc6 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -37,9 +37,12 @@ static int regcache_flat_init(struct regmap *map)
cache = map->cache;
- for (i = 0; i < map->num_reg_defaults; i++)
- cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
- map->reg_defaults[i].def;
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ unsigned int reg = map->reg_defaults[i].reg;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ cache[index] = map->reg_defaults[i].def;
+ }
return 0;
}
@@ -56,8 +59,9 @@ static int regcache_flat_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[regcache_flat_get_index(map, reg)];
+ *value = cache[index];
return 0;
}
@@ -66,8 +70,9 @@ static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- cache[regcache_flat_get_index(map, reg)] = value;
+ cache[index] = value;
return 0;
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 36ce3511c733..f3266334063e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -529,6 +529,18 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node;
const char *devname = "dummy";
+ /*
+ * Userspace can initiate reads from the hardware over debugfs.
+ * Normally internal regmap structures and buffers are protected with
+ * a mutex or a spinlock, but if the regmap owner decided to disable
+ * all locking mechanisms, this is no longer the case. For safety:
+ * don't create the debugfs entries if locking is disabled.
+ */
+ if (map->debugfs_disable) {
+ dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
+ return;
+ }
+
/* If we don't have the debugfs root yet, postpone init */
if (!regmap_debugfs_root) {
struct regmap_debugfs_node *node;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
new file mode 100644
index 000000000000..50a66382d87d
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include "internal.h"
+
+static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_write(slave, reg, val);
+}
+
+static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+ read = sdw_read(slave, reg);
+ if (read < 0)
+ return read;
+
+ *val = read;
+ return 0;
+}
+
+static struct regmap_bus regmap_sdw = {
+ .reg_read = regmap_sdw_read,
+ .reg_write = regmap_sdw_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_config_check(const struct regmap_config *config)
+{
+ /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
+ if (config->val_bits != 8)
+ return -ENOTSUPP;
+
+ /* Registers are 32 bits wide */
+ if (config->reg_bits != 32)
+ return -ENOTSUPP;
+
+ if (config->pad_bits != 0)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw);
+
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
+
+MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8d516a9bfc01..ee302ccdfbc8 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -414,7 +414,6 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
-#ifdef REGMAP_HWSPINLOCK
static void regmap_lock_hwlock(void *__map)
{
struct regmap *map = __map;
@@ -457,7 +456,11 @@ static void regmap_unlock_hwlock_irqrestore(void *__map)
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
}
-#endif
+
+static void regmap_lock_unlock_none(void *__map)
+{
+
+}
static void regmap_lock_mutex(void *__map)
{
@@ -669,16 +672,26 @@ struct regmap *__regmap_init(struct device *dev,
goto err;
}
- if (config->lock && config->unlock) {
+ if (config->name) {
+ map->name = kstrdup_const(config->name, GFP_KERNEL);
+ if (!map->name) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+ }
+
+ if (config->disable_locking) {
+ map->lock = map->unlock = regmap_lock_unlock_none;
+ regmap_debugfs_disable(map);
+ } else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
- } else if (config->hwlock_id) {
-#ifdef REGMAP_HWSPINLOCK
+ } else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
ret = -ENXIO;
- goto err_map;
+ goto err_name;
}
switch (config->hwlock_mode) {
@@ -697,10 +710,6 @@ struct regmap *__regmap_init(struct device *dev,
}
map->lock_arg = map;
-#else
- ret = -EINVAL;
- goto err_map;
-#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -762,14 +771,15 @@ struct regmap *__regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->name = config->name;
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
- if (config->read_flag_mask || config->write_flag_mask) {
+ if (config->read_flag_mask ||
+ config->write_flag_mask ||
+ config->zero_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
} else if (bus) {
@@ -1116,8 +1126,10 @@ err_range:
regmap_range_exit(map);
kfree(map->work_buf);
err_hwlock:
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+err_name:
+ kfree_const(map->name);
err_map:
kfree(map);
err:
@@ -1305,8 +1317,9 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ kfree_const(map->name);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -2423,13 +2436,15 @@ static int _regmap_bus_read(void *context, unsigned int reg,
{
int ret;
struct regmap *map = context;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
if (!map->format.parse_val)
return -EINVAL;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
if (ret == 0)
- *val = map->format.parse_val(map->work_buf);
+ *val = map->format.parse_val(work_val);
return ret;
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 442e777bdfb2..728075214959 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6619,43 +6619,27 @@ static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
#ifdef DAC960_GAM_MINOR
-/*
- * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
-*/
-
-static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
- unsigned long Argument)
+static long DAC960_gam_get_controller_info(DAC960_ControllerInfo_T __user *UserSpaceControllerInfo)
{
- long ErrorCode = 0;
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-
- mutex_lock(&DAC960_mutex);
- switch (Request)
- {
- case DAC960_IOCTL_GET_CONTROLLER_COUNT:
- ErrorCode = DAC960_ControllerCount;
- break;
- case DAC960_IOCTL_GET_CONTROLLER_INFO:
- {
- DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
- (DAC960_ControllerInfo_T __user *) Argument;
DAC960_ControllerInfo_T ControllerInfo;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceControllerInfo == NULL)
ErrorCode = -EINVAL;
else ErrorCode = get_user(ControllerNumber,
&UserSpaceControllerInfo->ControllerNumber);
if (ErrorCode != 0)
- break;
+ goto out;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1) {
- break;
+ goto out;
}
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
ControllerInfo.ControllerNumber = ControllerNumber;
ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -6670,12 +6654,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
- break;
- }
- case DAC960_IOCTL_V1_EXECUTE_COMMAND:
- {
- DAC960_V1_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V1_UserCommand_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v1_execute_command(DAC960_V1_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V1_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6688,39 +6672,41 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
int ControllerNumber, DataTransferLength;
unsigned char *DataTransferBuffer = NULL;
dma_addr_t DataTransferBufferDMA;
+ long ErrorCode;
+
if (UserSpaceUserCommand == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V1_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ControllerNumber = UserCommand.ControllerNumber;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
ErrorCode = -EINVAL;
if (Controller->FirmwareType != DAC960_V1_Controller)
- break;
+ goto out;
CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
DataTransferLength = UserCommand.DataTransferLength;
if (CommandOpcode & 0x80)
- break;
+ goto out;
if (CommandOpcode == DAC960_V1_DCDB)
{
if (copy_from_user(&DCDB, UserCommand.DCDB,
sizeof(DAC960_V1_DCDB_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
if (DCDB.Channel >= DAC960_V1_MaxChannels)
- break;
+ goto out;
if (!((DataTransferLength == 0 &&
DCDB.Direction
== DAC960_V1_DCDB_NoDataTransfer) ||
@@ -6730,15 +6716,15 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
(DataTransferLength < 0 &&
DCDB.Direction
== DAC960_V1_DCDB_DataTransferSystemToDevice)))
- break;
+ goto out;
if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
!= abs(DataTransferLength))
- break;
+ goto out;
DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
if (DCDB_IOBUF == NULL) {
ErrorCode = -ENOMEM;
- break;
+ goto out;
}
}
ErrorCode = -ENOMEM;
@@ -6748,19 +6734,19 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
}
if (CommandOpcode == DAC960_V1_DCDB)
@@ -6837,12 +6823,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (DCDB_IOBUF != NULL)
pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
DCDB_IOBUF, DCDB_IOBUFDMA);
- break;
- }
- case DAC960_IOCTL_V2_EXECUTE_COMMAND:
- {
- DAC960_V2_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V2_UserCommand_T __user *) Argument;
+ out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_execute_command(DAC960_V2_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V2_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6855,26 +6841,26 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
dma_addr_t DataTransferBufferDMA;
unsigned char *RequestSenseBuffer = NULL;
dma_addr_t RequestSenseBufferDMA;
+ long ErrorCode = -EINVAL;
- ErrorCode = -EINVAL;
if (UserSpaceUserCommand == NULL)
- break;
+ goto out;
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V2_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = UserCommand.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller){
ErrorCode = -EINVAL;
- break;
+ goto out;
}
DataTransferLength = UserCommand.DataTransferLength;
ErrorCode = -ENOMEM;
@@ -6884,14 +6870,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
@@ -7001,42 +6987,44 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (RequestSenseBuffer != NULL)
pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
RequestSenseBuffer, RequestSenseBufferDMA);
- break;
- }
- case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
- {
- DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus =
- (DAC960_V2_GetHealthStatus_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_get_health_status(DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus)
+{
DAC960_V2_GetHealthStatus_T GetHealthStatus;
DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceGetHealthStatus == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
sizeof(DAC960_V2_GetHealthStatus_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = GetHealthStatus.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&HealthStatusBuffer,
GetHealthStatus.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
!(Controller->V2.HealthStatusBuffer->StatusChangeCounter
@@ -7046,7 +7034,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DAC960_MonitoringTimerInterval);
if (ErrorCode == -ERESTARTSYS) {
ErrorCode = -EINTR;
- break;
+ goto out;
}
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
@@ -7054,7 +7042,39 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
else
ErrorCode = 0;
- }
+
+out:
+ return ErrorCode;
+}
+
+/*
+ * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
+*/
+
+static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
+ unsigned long Argument)
+{
+ long ErrorCode = 0;
+ void __user *argp = (void __user *)Argument;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+
+ mutex_lock(&DAC960_mutex);
+ switch (Request)
+ {
+ case DAC960_IOCTL_GET_CONTROLLER_COUNT:
+ ErrorCode = DAC960_ControllerCount;
+ break;
+ case DAC960_IOCTL_GET_CONTROLLER_INFO:
+ ErrorCode = DAC960_gam_get_controller_info(argp);
+ break;
+ case DAC960_IOCTL_V1_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v1_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v2_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
+ ErrorCode = DAC960_gam_v2_get_health_status(argp);
break;
default:
ErrorCode = -ENOTTY;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 40579d0cb3d1..ad9b687a236a 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -20,6 +20,10 @@ config BLK_DEV_NULL_BLK
tristate "Null test block driver"
select CONFIGFS_FS
+config BLK_DEV_NULL_BLK_FAULT_INJECTION
+ bool "Support fault injection for Null test block driver"
+ depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
+
config BLK_DEV_FD
tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 9220f8e833d0..c0ebda1283cc 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -112,8 +112,7 @@ enum frame_flags {
struct frame {
struct list_head head;
u32 tag;
- struct timeval sent; /* high-res time packet was sent */
- u32 sent_jiffs; /* low-res jiffies-based sent time */
+ ktime_t sent; /* high-res time packet was sent */
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 812fed069708..540bb60cd071 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -398,8 +398,7 @@ aoecmd_ata_rw(struct aoedev *d)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -489,8 +488,7 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -499,33 +497,17 @@ resend(struct aoedev *d, struct frame *f)
static int
tsince_hr(struct frame *f)
{
- struct timeval now;
- int n;
+ u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
- do_gettimeofday(&now);
- n = now.tv_usec - f->sent.tv_usec;
- n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+ /* delta is normally under 4.2 seconds, avoid 64-bit division */
+ if (likely(delta <= UINT_MAX))
+ return (u32)delta / NSEC_PER_USEC;
- if (n < 0)
- n = -n;
+ /* avoid overflow after 71 minutes */
+ if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
+ return INT_MAX;
- /* For relatively long periods, use jiffies to avoid
- * discrepancies caused by updates to the system time.
- *
- * On system with HZ of 1000, 32-bits is over 49 days
- * worth of jiffies, or over 71 minutes worth of usecs.
- *
- * Jiffies overflow is handled by subtraction of unsigned ints:
- * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
- * $3 = 4
- * (gdb)
- */
- if (n > USEC_PER_SEC / 4) {
- n = ((u32) jiffies) - f->sent_jiffs;
- n *= USEC_PER_SEC / HZ;
- }
-
- return n;
+ return div_u64(delta, NSEC_PER_USEC);
}
static int
@@ -589,7 +571,6 @@ reassign_frame(struct frame *f)
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
- nf->sent_jiffs = f->sent_jiffs;
f->skb = skb;
return nf;
@@ -633,8 +614,7 @@ probe(struct aoetgt *t)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -1432,10 +1412,8 @@ aoecmd_ata_id(struct aoedev *d)
d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
- if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
- }
+ if (skb)
+ f->sent = ktime_get();
return skb;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index bd97908c766f..9f4e6f502b84 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -953,7 +953,7 @@ static void drbd_bm_endio(struct bio *bio)
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
- unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
+ unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 4b4697a1f963..0a0394aa1b9c 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1847,7 +1847,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov = {.iov_base = buf, .iov_len = size};
- struct msghdr msg;
+ struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
int rv, sent = 0;
if (!sock)
@@ -1855,12 +1855,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
/* THINK if (signal_pending) return ... ? */
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
if (sock == connection->data.socket) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cb2fa63f6bc0..c72dee0ef083 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -516,7 +516,8 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
struct msghdr msg = {
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+ return sock_recvmsg(sock, &msg, msg.msg_flags);
}
static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ad0477ae820f..6655893a3a7a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -12,9 +12,9 @@
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
-#include <linux/lightnvm.h>
#include <linux/configfs.h>
#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
#define SECTOR_SHIFT 9
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
@@ -27,6 +27,10 @@
#define TICKS_PER_SEC 50ULL
#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(null_timeout_attr);
+#endif
+
static inline u64 mb_per_tick(int mbps)
{
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
@@ -107,7 +111,6 @@ struct nullb_device {
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool use_lightnvm; /* register as a LightNVM device */
bool blocking; /* blocking blk-mq device */
bool use_per_node_hctx; /* use per-node allocation for hardware context */
bool power; /* power on/off the device */
@@ -121,7 +124,6 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
- struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
unsigned int queue_depth;
@@ -139,7 +141,6 @@ static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static DEFINE_IDA(nullb_indexes);
-static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
enum {
@@ -166,6 +167,11 @@ static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static char g_timeout_str[80];
+module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO);
+#endif
+
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
@@ -208,10 +214,6 @@ static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool g_use_lightnvm;
-module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
-MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-
static bool g_blocking;
module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
@@ -345,7 +347,6 @@ NULLB_DEVICE_ATTR(blocksize, uint);
NULLB_DEVICE_ATTR(irqmode, uint);
NULLB_DEVICE_ATTR(hw_queue_depth, uint);
NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(use_lightnvm, bool);
NULLB_DEVICE_ATTR(blocking, bool);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
NULLB_DEVICE_ATTR(memory_backed, bool);
@@ -455,7 +456,6 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_use_lightnvm,
&nullb_device_attr_blocking,
&nullb_device_attr_use_per_node_hctx,
&nullb_device_attr_power,
@@ -573,7 +573,6 @@ static struct nullb_device *null_alloc_dev(void)
dev->blocksize = g_bs;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
- dev->use_lightnvm = g_use_lightnvm;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
return dev;
@@ -1352,6 +1351,12 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
{
struct nullb *nullb = q->queuedata;
@@ -1369,6 +1374,16 @@ static int null_rq_prep_fn(struct request_queue *q, struct request *req)
return BLKPREP_DEFER;
}
+static bool should_timeout_request(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (g_timeout_str[0])
+ return should_fail(&null_timeout_attr, 1);
+#endif
+
+ return false;
+}
+
static void null_request_fn(struct request_queue *q)
{
struct request *rq;
@@ -1376,12 +1391,20 @@ static void null_request_fn(struct request_queue *q)
while ((rq = blk_fetch_request(q)) != NULL) {
struct nullb_cmd *cmd = rq->special;
- spin_unlock_irq(q->queue_lock);
- null_handle_cmd(cmd);
- spin_lock_irq(q->queue_lock);
+ if (!should_timeout_request(rq)) {
+ spin_unlock_irq(q->queue_lock);
+ null_handle_cmd(cmd);
+ spin_lock_irq(q->queue_lock);
+ }
}
}
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1399,12 +1422,16 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
- return null_handle_cmd(cmd);
+ if (!should_timeout_request(bd->rq))
+ return null_handle_cmd(cmd);
+
+ return BLK_STS_OK;
}
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_softirq_done_fn,
+ .timeout = null_timeout_rq,
};
static void cleanup_queue(struct nullb_queue *nq)
@@ -1423,170 +1450,6 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
-#ifdef CONFIG_NVM
-
-static void null_lnvm_end_io(struct request *rq, blk_status_t status)
-{
- struct nvm_rq *rqd = rq->end_io_data;
-
- /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
- rqd->error = status ? -EIO : 0;
- nvm_end_io(rqd);
-
- blk_put_request(rq);
-}
-
-static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- struct request_queue *q = dev->q;
- struct request *rq;
- struct bio *bio = rqd->bio;
-
- rq = blk_mq_alloc_request(q,
- op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
-
- blk_init_request_from_bio(rq, bio);
-
- rq->end_io_data = rqd;
-
- blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
-
- return 0;
-}
-
-static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
-{
- struct nullb *nullb = dev->q->queuedata;
- sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
- sector_t blksize;
- struct nvm_id_group *grp;
-
- id->ver_id = 0x1;
- id->vmnt = 0;
- id->cap = 0x2;
- id->dom = 0x1;
-
- id->ppaf.blk_offset = 0;
- id->ppaf.blk_len = 16;
- id->ppaf.pg_offset = 16;
- id->ppaf.pg_len = 16;
- id->ppaf.sect_offset = 32;
- id->ppaf.sect_len = 8;
- id->ppaf.pln_offset = 40;
- id->ppaf.pln_len = 8;
- id->ppaf.lun_offset = 48;
- id->ppaf.lun_len = 8;
- id->ppaf.ch_offset = 56;
- id->ppaf.ch_len = 8;
-
- sector_div(size, nullb->dev->blocksize); /* convert size to pages */
- size >>= 8; /* concert size to pgs pr blk */
- grp = &id->grp;
- grp->mtype = 0;
- grp->fmtype = 0;
- grp->num_ch = 1;
- grp->num_pg = 256;
- blksize = size;
- size >>= 16;
- grp->num_lun = size + 1;
- sector_div(blksize, grp->num_lun);
- grp->num_blk = blksize;
- grp->num_pln = 1;
-
- grp->fpg_sz = nullb->dev->blocksize;
- grp->csecs = nullb->dev->blocksize;
- grp->trdt = 25000;
- grp->trdm = 25000;
- grp->tprt = 500000;
- grp->tprm = 500000;
- grp->tbet = 1500000;
- grp->tbem = 1500000;
- grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = nullb->dev->hw_queue_depth;
-
- return 0;
-}
-
-static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
-{
- mempool_t *virtmem_pool;
-
- virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
- if (!virtmem_pool) {
- pr_err("null_blk: Unable to create virtual memory pool\n");
- return NULL;
- }
-
- return virtmem_pool;
-}
-
-static void null_lnvm_destroy_dma_pool(void *pool)
-{
- mempool_destroy(pool);
-}
-
-static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
- gfp_t mem_flags, dma_addr_t *dma_handler)
-{
- return mempool_alloc(pool, mem_flags);
-}
-
-static void null_lnvm_dev_dma_free(void *pool, void *entry,
- dma_addr_t dma_handler)
-{
- mempool_free(entry, pool);
-}
-
-static struct nvm_dev_ops null_lnvm_dev_ops = {
- .identity = null_lnvm_id,
- .submit_io = null_lnvm_submit_io,
-
- .create_dma_pool = null_lnvm_create_dma_pool,
- .destroy_dma_pool = null_lnvm_destroy_dma_pool,
- .dev_dma_alloc = null_lnvm_dev_dma_alloc,
- .dev_dma_free = null_lnvm_dev_dma_free,
-
- /* Simulate nvme protocol restriction */
- .max_phys_sect = 64,
-};
-
-static int null_nvm_register(struct nullb *nullb)
-{
- struct nvm_dev *dev;
- int rv;
-
- dev = nvm_alloc_dev(0);
- if (!dev)
- return -ENOMEM;
-
- dev->q = nullb->q;
- memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
- dev->ops = &null_lnvm_dev_ops;
-
- rv = nvm_register(dev);
- if (rv) {
- kfree(dev);
- return rv;
- }
- nullb->ndev = dev;
- return 0;
-}
-
-static void null_nvm_unregister(struct nullb *nullb)
-{
- nvm_unregister(nullb->ndev);
-}
-#else
-static int null_nvm_register(struct nullb *nullb)
-{
- pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
- return -EINVAL;
-}
-static void null_nvm_unregister(struct nullb *nullb) {}
-#endif /* CONFIG_NVM */
-
static void null_del_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
@@ -1595,10 +1458,7 @@ static void null_del_dev(struct nullb *nullb)
list_del_init(&nullb->list);
- if (dev->use_lightnvm)
- null_nvm_unregister(nullb);
- else
- del_gendisk(nullb->disk);
+ del_gendisk(nullb->disk);
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
@@ -1610,8 +1470,7 @@ static void null_del_dev(struct nullb *nullb)
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!dev->use_lightnvm)
- put_disk(nullb->disk);
+ put_disk(nullb->disk);
cleanup_queues(nullb);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
@@ -1775,11 +1634,6 @@ static void null_validate_conf(struct nullb_device *dev)
{
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
- if (dev->use_lightnvm && dev->blocksize != 4096)
- dev->blocksize = 4096;
-
- if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
- dev->queue_mode = NULL_Q_MQ;
if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
@@ -1805,6 +1659,20 @@ static void null_validate_conf(struct nullb_device *dev)
dev->mbps = 0;
}
+static bool null_setup_fault(void)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (!g_timeout_str[0])
+ return true;
+
+ if (!setup_fault_attr(&null_timeout_attr, g_timeout_str))
+ return false;
+
+ null_timeout_attr.verbose = 0;
+#endif
+ return true;
+}
+
static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
@@ -1838,6 +1706,10 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_queues;
+ if (!null_setup_fault())
+ goto out_cleanup_queues;
+
+ nullb->tag_set->timeout = 5 * HZ;
nullb->q = blk_mq_init_queue(nullb->tag_set);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
@@ -1861,8 +1733,14 @@ static int null_add_dev(struct nullb_device *dev)
rv = -ENOMEM;
goto out_cleanup_queues;
}
+
+ if (!null_setup_fault())
+ goto out_cleanup_blk_queue;
+
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
+ nullb->q->rq_timeout = 5 * HZ;
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1895,11 +1773,7 @@ static int null_add_dev(struct nullb_device *dev)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (dev->use_lightnvm)
- rv = null_nvm_register(nullb);
- else
- rv = null_gendisk_register(nullb);
-
+ rv = null_gendisk_register(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1938,18 +1812,6 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
- if (g_use_lightnvm && g_bs != 4096) {
- pr_warn("null_blk: LightNVM only supports 4k block size\n");
- pr_warn("null_blk: defaults block size to 4k\n");
- g_bs = 4096;
- }
-
- if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
- pr_warn("null_blk: LightNVM only supported for blk-mq\n");
- pr_warn("null_blk: defaults queue mode to blk-mq\n");
- g_queue_mode = NULL_Q_MQ;
- }
-
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
@@ -1982,16 +1844,6 @@ static int __init null_init(void)
goto err_conf;
}
- if (g_use_lightnvm) {
- ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
- 0, 0, NULL);
- if (!ppa_cache) {
- pr_err("null_blk: unable to create ppa cache\n");
- ret = -ENOMEM;
- goto err_ppa;
- }
- }
-
for (i = 0; i < nr_devices; i++) {
dev = null_alloc_dev();
if (!dev) {
@@ -2015,8 +1867,6 @@ err_dev:
null_del_dev(nullb);
null_free_dev(dev);
}
- kmem_cache_destroy(ppa_cache);
-err_ppa:
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
@@ -2047,8 +1897,6 @@ static void __exit null_exit(void)
if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
-
- kmem_cache_destroy(ppa_cache);
}
module_init(null_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 67974796c350..531a0915066b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (ret)
+ return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
- bdput(bdev);
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
- if (ret)
- return ret;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
- goto out_new_dev;
+ goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
@@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
mutex_unlock(&ctl_mutex);
return 0;
-out_new_dev:
- blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
deleted file mode 100644
index e5565fbaeb30..000000000000
--- a/drivers/block/smart1,2.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * If you want to make changes, improve or add functionality to this
- * driver, you'll probably need the Compaq Array Controller Interface
- * Specificiation (Document number ECG086/1198)
- */
-
-/*
- * This file contains the controller communication implementation for
- * Compaq SMART-1 and SMART-2 controllers. To the best of my knowledge,
- * this should support:
- *
- * PCI:
- * SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200
- * Integerated SMART Array Controller, SMART-4200, SMART-4250ES
- *
- * EISA:
- * SMART-2/E, SMART, IAES, IDA-2, IDA
- */
-
-/*
- * Memory mapped FIFO interface (SMART 42xx cards)
- */
-static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET);
-}
-
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x08 turns them off...
- */
-static void smart4_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- writel( S42XX_INTR_OFF,
- h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/*
- * For older cards FIFO Full = 0.
- * On this card 0 means there is room, anything else FIFO Full.
- *
- */
-static unsigned long smart4_fifo_full(ctlr_info_t *h)
-{
-
- return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET));
-}
-
-/* This type of controller returns -1 if the fifo is empty,
- * Not 0 like the others.
- * And we need to let it know we read a value out
- */
-static unsigned long smart4_completed(ctlr_info_t *h)
-{
- long register_value
- = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- /* Fifo is empty */
- if( register_value == 0xffffffff)
- return 0;
-
- /* Need to let it know we got the reply */
- /* We do this by writing a 0 to the port we just read from */
- writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- return ((unsigned long) register_value);
-}
-
- /*
- * This hardware returns interrupt pending at a different place and
- * it does not tell us if the fifo is empty, we will have check
- * that by getting a 0 back from the command_completed call.
- */
-static unsigned long smart4_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + S42XX_INTR_STATUS);
-
- if( register_value & S42XX_INTR_PENDING)
- return FIFO_NOT_EMPTY;
- return 0 ;
-}
-
-static struct access_method smart4_access = {
- smart4_submit_command,
- smart4_intr_mask,
- smart4_fifo_full,
- smart4_intr_pending,
- smart4_completed,
-};
-
-/*
- * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards)
- */
-static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + COMMAND_FIFO);
-}
-
-static void smart2_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- writel(val, h->vaddr + INTR_MASK);
-}
-
-static unsigned long smart2_fifo_full(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_FIFO);
-}
-
-static unsigned long smart2_completed(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2_intr_pending(ctlr_info_t *h)
-{
- return readl(h->vaddr + INTR_PENDING);
-}
-
-static struct access_method smart2_access = {
- smart2_submit_command,
- smart2_intr_mask,
- smart2_fifo_full,
- smart2_intr_pending,
- smart2_completed,
-};
-
-/*
- * IO access for SMART-2/E cards
- */
-static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO);
-}
-
-static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- outl(val, h->io_mem_addr + INTR_MASK);
-}
-
-static unsigned long smart2e_fifo_full(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_FIFO);
-}
-
-static unsigned long smart2e_completed(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2e_intr_pending(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + INTR_PENDING);
-}
-
-static struct access_method smart2e_access = {
- smart2e_submit_command,
- smart2e_intr_mask,
- smart2e_fifo_full,
- smart2e_intr_pending,
- smart2e_completed,
-};
-
-/*
- * IO access for older SMART-1 type cards
- */
-#define SMART1_SYSTEM_MASK 0xC8E
-#define SMART1_SYSTEM_DOORBELL 0xC8F
-#define SMART1_LOCAL_MASK 0xC8C
-#define SMART1_LOCAL_DOORBELL 0xC8D
-#define SMART1_INTR_MASK 0xC89
-#define SMART1_LISTADDR 0xC90
-#define SMART1_LISTLEN 0xC94
-#define SMART1_TAG 0xC97
-#define SMART1_COMPLETE_ADDR 0xC98
-#define SMART1_LISTSTATUS 0xC9E
-
-#define CHANNEL_BUSY 0x01
-#define CHANNEL_CLEAR 0x02
-
-static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- /*
- * This __u16 is actually a bunch of control flags on SMART
- * and below. We want them all to be zero.
- */
- c->hdr.size = 0;
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR);
- outw(c->size, h->io_mem_addr + SMART1_LISTLEN);
-
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-}
-
-static void smart1_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val == 1) {
- outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
- outb(0x01, h->io_mem_addr + SMART1_INTR_MASK);
- outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK);
- } else {
- outb(0, h->io_mem_addr + 0xC8E);
- }
-}
-
-static unsigned long smart1_fifo_full(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR;
- return chan;
-}
-
-static unsigned long smart1_completed(ctlr_info_t *h)
-{
- unsigned char status;
- unsigned long cmd;
-
- if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) {
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR);
- status = inb(h->io_mem_addr + SMART1_LISTSTATUS);
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-
- /*
- * this is x86 (actually compaq x86) only, so it's ok
- */
- if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status;
- } else {
- cmd = 0;
- }
- return cmd;
-}
-
-static unsigned long smart1_intr_pending(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY;
- return chan;
-}
-
-static struct access_method smart1_access = {
- smart1_submit_command,
- smart1_intr_mask,
- smart1_fifo_full,
- smart1_intr_pending,
- smart1_completed,
-};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d70eba30003a..0afa6c8c3857 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -430,7 +430,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
static void zram_page_end_io(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c823914b3a80..b6a71705b7d6 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -794,7 +794,7 @@ static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
return 0;
}
-static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
+static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
struct file *filp, poll_table *wait)
{
return 0;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index e6f6dbc04131..0521748a1972 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -299,7 +299,7 @@ static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from)
return vhci_get_user(data, from);
}
-static unsigned int vhci_poll(struct file *file, poll_table *wait)
+static __poll_t vhci_poll(struct file *file, poll_table *wait)
{
struct vhci_data *data = file->private_data;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index dc7b3c7b7d42..57e011d36a79 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -120,7 +120,7 @@ config QCOM_EBI2
SRAM, ethernet adapters, FPGAs and LCD displays.
config SIMPLE_PM_BUS
- bool "Simple Power-Managed Bus Driver"
+ tristate "Simple Power-Managed Bus Driver"
depends on OF && PM
help
Driver for transparent busses that don't need a real driver, but
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 1dfb9f8de171..a2a1c1478cd0 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -236,7 +236,7 @@ static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t
return ret;
}
-static unsigned int apm_poll(struct file *fp, poll_table * wait)
+static __poll_t apm_poll(struct file *fp, poll_table * wait)
{
struct apm_user *as = fp->private_data;
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 0d7b577e0ff0..2f92cc46698b 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -406,7 +406,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
* Do I need this function at all???
*/
#if 0
-static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
+static __poll_t dsp56k_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file)) & 0x0f;
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 839ee61d352a..2697c22e3be2 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -91,7 +91,7 @@ static ssize_t dtlk_read(struct file *, char __user *,
size_t nbytes, loff_t * ppos);
static ssize_t dtlk_write(struct file *, const char __user *,
size_t nbytes, loff_t * ppos);
-static unsigned int dtlk_poll(struct file *, poll_table *);
+static __poll_t dtlk_poll(struct file *, poll_table *);
static int dtlk_open(struct inode *, struct file *);
static int dtlk_release(struct inode *, struct file *);
static long dtlk_ioctl(struct file *file,
@@ -228,9 +228,9 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
return -EAGAIN;
}
-static unsigned int dtlk_poll(struct file *file, poll_table * wait)
+static __poll_t dtlk_poll(struct file *file, poll_table * wait)
{
- int mask = 0;
+ __poll_t mask = 0;
unsigned long expires;
TRACE_TEXT(" dtlk_poll");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index b941e6d59fd6..dbed4953f86c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -342,7 +342,7 @@ out:
return retval;
}
-static unsigned int hpet_poll(struct file *file, poll_table * wait)
+static __poll_t hpet_poll(struct file *file, poll_table * wait)
{
unsigned long v;
struct hpet_dev *devp;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f6e3e5abc117..4d0f571c15f9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -73,26 +73,14 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
-config HW_RANDOM_BCM63XX
- tristate "Broadcom BCM63xx Random Number Generator support"
- depends on BCM63XX || BMIPS_GENERIC
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM63xx SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called bcm63xx-rng
-
- If unusure, say Y.
-
config HW_RANDOM_BCM2835
- tristate "Broadcom BCM2835 Random Number Generator support"
- depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
+ tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
+ ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM2835 SoCs.
+ Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs.
To compile this driver as a module, choose M here: the
module will be called bcm2835-rng
@@ -306,19 +294,6 @@ config HW_RANDOM_POWERNV
If unsure, say Y.
-config HW_RANDOM_TPM
- tristate "TPM HW Random Number Generator support"
- depends on TCG_TPM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator in the Trusted Platform Module
-
- To compile this driver as a module, choose M here: the
- module will be called tpm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_HISI
tristate "Hisilicon Random Number Generator support"
depends on HW_RANDOM && ARCH_HISI
@@ -449,6 +424,18 @@ config HW_RANDOM_S390
If unsure, say Y.
+config HW_RANDOM_EXYNOS
+ tristate "Samsung Exynos True Random Number Generator support"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default HW_RANDOM
+ ---help---
+ This driver provides support for the True Random Number
+ Generator available in Exynos SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called exynos-trng.
+
+ If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index f3728d008fff..b780370bd4eb 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -9,11 +9,11 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
-obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
@@ -27,7 +27,6 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
-obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 574211a49549..7a84cec30c3a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/clk.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -29,116 +30,180 @@
#define RNG_INT_OFF 0x1
-static void __init nsp_rng_init(void __iomem *base)
+struct bcm2835_rng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+ bool mask_interrupts;
+ struct clk *clk;
+};
+
+static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
{
- u32 val;
+ return container_of(rng, struct bcm2835_rng_priv, rng);
+}
+
+static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(priv->base + offset);
+ else
+ return readl(priv->base + offset);
+}
- /* mask the interrupt */
- val = readl(base + RNG_INT_MASK);
- val |= RNG_INT_OFF;
- writel(val, base + RNG_INT_MASK);
+static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val,
+ u32 offset)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, priv->base + offset);
+ else
+ writel(val, priv->base + offset);
}
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
- void __iomem *rng_base = (void __iomem *)rng->priv;
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
u32 max_words = max / sizeof(u32);
u32 num_words, count;
- while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
cpu_relax();
}
- num_words = readl(rng_base + RNG_STATUS) >> 24;
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
if (num_words > max_words)
num_words = max_words;
for (count = 0; count < num_words; count++)
- ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
+ ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA);
return num_words * sizeof(u32);
}
-static struct hwrng bcm2835_rng_ops = {
- .name = "bcm2835",
- .read = bcm2835_rng_read,
+static int bcm2835_rng_init(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+ int ret = 0;
+ u32 val;
+
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->mask_interrupts) {
+ /* mask the interrupt */
+ val = rng_readl(priv, RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ rng_writel(priv, val, RNG_INT_MASK);
+ }
+
+ /* set warm-up count & enable */
+ rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS);
+ rng_writel(priv, RNG_RBGEN, RNG_CTRL);
+
+ return ret;
+}
+
+static void bcm2835_rng_cleanup(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+
+ /* disable rng hardware */
+ rng_writel(priv, 0, RNG_CTRL);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+}
+
+struct bcm2835_rng_of_data {
+ bool mask_interrupts;
+};
+
+static const struct bcm2835_rng_of_data nsp_rng_of_data = {
+ .mask_interrupts = true,
};
static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm2835-rng"},
- { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
- { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
+ { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm6368-rng"},
{},
};
static int bcm2835_rng_probe(struct platform_device *pdev)
{
+ const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- void (*rng_setup)(void __iomem *base);
const struct of_device_id *rng_id;
- void __iomem *rng_base;
+ struct bcm2835_rng_priv *priv;
+ struct resource *r;
int err;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* map peripheral */
- rng_base = of_iomap(np, 0);
- if (!rng_base) {
- dev_err(dev, "failed to remap rng regs");
- return -ENODEV;
- }
- bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ priv->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Clock is optional on most platforms */
+ priv->clk = devm_clk_get(dev, NULL);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = bcm2835_rng_init;
+ priv->rng.read = bcm2835_rng_read;
+ priv->rng.cleanup = bcm2835_rng_cleanup;
rng_id = of_match_node(bcm2835_rng_of_match, np);
- if (!rng_id) {
- iounmap(rng_base);
+ if (!rng_id)
return -EINVAL;
- }
- /* Check for rng init function, execute it */
- rng_setup = rng_id->data;
- if (rng_setup)
- rng_setup(rng_base);
- /* set warm-up count & enable */
- __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
- __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+ /* Check for rng init function, execute it */
+ of_data = rng_id->data;
+ if (of_data)
+ priv->mask_interrupts = of_data->mask_interrupts;
/* register driver */
- err = hwrng_register(&bcm2835_rng_ops);
- if (err) {
+ err = devm_hwrng_register(dev, &priv->rng);
+ if (err)
dev_err(dev, "hwrng registration failed\n");
- iounmap(rng_base);
- } else
+ else
dev_info(dev, "hwrng registered\n");
return err;
}
-static int bcm2835_rng_remove(struct platform_device *pdev)
-{
- void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv;
-
- /* disable rng hardware */
- __raw_writel(0, rng_base + RNG_CTRL);
-
- /* unregister driver */
- hwrng_unregister(&bcm2835_rng_ops);
- iounmap(rng_base);
-
- return 0;
-}
-
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+static struct platform_device_id bcm2835_rng_devtype[] = {
+ { .name = "bcm2835-rng" },
+ { .name = "bcm63xx-rng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype);
+
static struct platform_driver bcm2835_rng_driver = {
.driver = {
.name = "bcm2835-rng",
.of_match_table = bcm2835_rng_of_match,
},
.probe = bcm2835_rng_probe,
- .remove = bcm2835_rng_remove,
+ .id_table = bcm2835_rng_devtype,
};
module_platform_driver(bcm2835_rng_driver);
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
deleted file mode 100644
index 5132c9cde50d..000000000000
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Broadcom BCM63xx Random Number Generator support
- *
- * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
- * Copyright (C) 2009, Broadcom Corporation
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/of.h>
-
-#define RNG_CTRL 0x00
-#define RNG_EN (1 << 0)
-
-#define RNG_STAT 0x04
-#define RNG_AVAIL_MASK (0xff000000)
-
-#define RNG_DATA 0x08
-#define RNG_THRES 0x0c
-#define RNG_MASK 0x10
-
-struct bcm63xx_rng_priv {
- struct hwrng rng;
- struct clk *clk;
- void __iomem *regs;
-};
-
-#define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng)
-
-static int bcm63xx_rng_init(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
- int error;
-
- error = clk_prepare_enable(priv->clk);
- if (error)
- return error;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val |= RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- return 0;
-}
-
-static void bcm63xx_rng_cleanup(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val &= ~RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- clk_disable_unprepare(priv->clk);
-}
-
-static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
-}
-
-static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- *data = __raw_readl(priv->regs + RNG_DATA);
-
- return 4;
-}
-
-static int bcm63xx_rng_probe(struct platform_device *pdev)
-{
- struct resource *r;
- int ret;
- struct bcm63xx_rng_priv *priv;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no iomem resource\n");
- return -ENXIO;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->rng.name = pdev->name;
- priv->rng.init = bcm63xx_rng_init;
- priv->rng.cleanup = bcm63xx_rng_cleanup;
- priv->rng.data_present = bcm63xx_rng_data_present;
- priv->rng.data_read = bcm63xx_rng_data_read;
-
- priv->clk = devm_clk_get(&pdev->dev, "ipsec");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "no clock for device: %d\n", ret);
- return ret;
- }
-
- if (!devm_request_mem_region(&pdev->dev, r->start,
- resource_size(r), pdev->name)) {
- dev_err(&pdev->dev, "request mem failed");
- return -EBUSY;
- }
-
- priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
- resource_size(r));
- if (!priv->regs) {
- dev_err(&pdev->dev, "ioremap failed");
- return -ENOMEM;
- }
-
- ret = devm_hwrng_register(&pdev->dev, &priv->rng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rng device: %d\n",
- ret);
- return ret;
- }
-
- dev_info(&pdev->dev, "registered RNG driver\n");
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id bcm63xx_rng_of_match[] = {
- { .compatible = "brcm,bcm6368-rng", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
-#endif
-
-static struct platform_driver bcm63xx_rng_driver = {
- .probe = bcm63xx_rng_probe,
- .driver = {
- .name = "bcm63xx-rng",
- .of_match_table = of_match_ptr(bcm63xx_rng_of_match),
- },
-};
-
-module_platform_driver(bcm63xx_rng_driver);
-
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 657b8770b6b9..91bb98c42a1c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -306,6 +306,10 @@ static int enable_best_rng(void)
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
if (!ret)
cur_rng_set_by_user = 0;
+ } else {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ ret = 0;
}
return ret;
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
new file mode 100644
index 000000000000..1947aed7c044
--- /dev/null
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RNG driver for Exynos TRNGs
+ *
+ * Author: Łukasz Stelmach <l.stelmach@samsung.com>
+ *
+ * Copyright 2017 (c) Samsung Electronics Software, Inc.
+ *
+ * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by
+ * Krzysztof Kozłowski <krzk@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define EXYNOS_TRNG_CLKDIV (0x0)
+
+#define EXYNOS_TRNG_CTRL (0x20)
+#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL (0x30)
+#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
+#define EXYNOS_TRNG_ONLINE_STAT (0x44)
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
+#define EXYNOS_TRNG_FIFO_CTRL (0x50)
+#define EXYNOS_TRNG_FIFO_0 (0x80)
+#define EXYNOS_TRNG_FIFO_1 (0x84)
+#define EXYNOS_TRNG_FIFO_2 (0x88)
+#define EXYNOS_TRNG_FIFO_3 (0x8c)
+#define EXYNOS_TRNG_FIFO_4 (0x90)
+#define EXYNOS_TRNG_FIFO_5 (0x94)
+#define EXYNOS_TRNG_FIFO_6 (0x98)
+#define EXYNOS_TRNG_FIFO_7 (0x9c)
+#define EXYNOS_TRNG_FIFO_LEN (8)
+#define EXYNOS_TRNG_CLOCK_RATE (500000)
+
+
+struct exynos_trng_dev {
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct exynos_trng_dev *trng;
+ int val;
+
+ max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
+
+ trng = (struct exynos_trng_dev *)rng->priv;
+
+ writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
+ val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
+ val == 0, 200, 1000000);
+ if (val < 0)
+ return val;
+
+ memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max);
+
+ return max;
+}
+
+static int exynos_trng_init(struct hwrng *rng)
+{
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+ unsigned long sss_rate;
+ u32 val;
+
+ sss_rate = clk_get_rate(trng->clk);
+
+ /*
+ * For most TRNG circuits the clock frequency of under 500 kHz
+ * is safe.
+ */
+ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
+ if (val > 0x7fff) {
+ dev_err(trng->dev, "clock divider too large: %d", val);
+ return -ERANGE;
+ }
+ val = val << 1;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV);
+
+ /* Enable the generator. */
+ val = EXYNOS_TRNG_CTRL_RNGEN;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL);
+
+ /*
+ * Disable post-processing. /dev/hwrng is supposed to deliver
+ * unprocessed data.
+ */
+ writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL);
+
+ return 0;
+}
+
+static int exynos_trng_probe(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return ret;
+
+ trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
+ GFP_KERNEL);
+ if (!trng->rng.name)
+ return ret;
+
+ trng->rng.init = exynos_trng_init;
+ trng->rng.read = exynos_trng_do_read;
+ trng->rng.priv = (unsigned long) trng;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not get runtime PM.\n");
+ goto err_pm_get;
+ }
+
+ trng->clk = devm_clk_get(&pdev->dev, "secss");
+ if (IS_ERR(trng->clk)) {
+ ret = PTR_ERR(trng->clk);
+ dev_err(&pdev->dev, "Could not get clock.\n");
+ goto err_clock;
+ }
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable the clk.\n");
+ goto err_clock;
+ }
+
+ ret = hwrng_register(&trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(trng->clk);
+
+err_clock:
+ pm_runtime_put_sync(&pdev->dev);
+
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int exynos_trng_remove(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&trng->rng);
+ clk_disable_unprepare(trng->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not get runtime PM.\n");
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+ exynos_trng_resume);
+
+static const struct of_device_id exynos_trng_dt_match[] = {
+ {
+ .compatible = "samsung,exynos5250-trng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
+
+static struct platform_driver exynos_trng_driver = {
+ .driver = {
+ .name = "exynos-trng",
+ .pm = &exynos_trng_pm_ops,
+ .of_match_table = exynos_trng_dt_match,
+ },
+ .probe = exynos_trng_probe,
+ .remove = exynos_trng_remove,
+};
+
+module_platform_driver(exynos_trng_driver);
+MODULE_AUTHOR("Łukasz Stelmach");
+MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 88db42d30760..eca87249bcff 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -282,8 +282,7 @@ static int __exit imx_rngc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int imx_rngc_suspend(struct device *dev)
+static int __maybe_unused imx_rngc_suspend(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -292,7 +291,7 @@ static int imx_rngc_suspend(struct device *dev)
return 0;
}
-static int imx_rngc_resume(struct device *dev)
+static int __maybe_unused imx_rngc_resume(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -301,11 +300,7 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops imx_rngc_pm_ops = {
- .suspend = imx_rngc_suspend,
- .resume = imx_rngc_resume,
-};
-#endif
+SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
@@ -316,9 +311,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = "imx_rngc",
-#ifdef CONFIG_PM
.pm = &imx_rngc_pm_ops,
-#endif
.of_match_table = imx_rngc_dt_ids,
},
.remove = __exit_p(imx_rngc_remove),
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 8da7bcf54105..7f99cd52b40e 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -135,6 +135,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
#endif
priv->rng.read = mtk_rng_read;
priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/char/hw_random/tpm-rng.c b/drivers/char/hw_random/tpm-rng.c
deleted file mode 100644
index d6d448266f07..000000000000
--- a/drivers/char/hw_random/tpm-rng.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2012 Kent Yoder IBM Corporation
- *
- * HWRNG interfaces to pull RNG data from a TPM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/hw_random.h>
-#include <linux/tpm.h>
-
-#define MODULE_NAME "tpm-rng"
-
-static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- return tpm_get_random(TPM_ANY_NUM, data, max);
-}
-
-static struct hwrng tpm_rng = {
- .name = MODULE_NAME,
- .read = tpm_rng_read,
-};
-
-static int __init rng_init(void)
-{
- return hwrng_register(&tpm_rng);
-}
-module_init(rng_init);
-
-static void __exit rng_exit(void)
-{
- hwrng_unregister(&tpm_rng);
-}
-module_exit(rng_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Kent Yoder <key@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("RNG driver for TPM devices");
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 6edfaa72b98b..7992c870b0a2 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -338,10 +338,10 @@ static int bt_bmc_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+static __poll_t bt_bmc_poll(struct file *file, poll_table *wait)
{
struct bt_bmc *bt_bmc = file_bt_bmc(file);
- unsigned int mask = 0;
+ __poll_t mask = 0;
u8 ctrl;
poll_wait(file, &bt_bmc->queue, wait);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 2ffca4232686..a011a7739f5e 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -78,10 +78,10 @@ static void file_receive_handler(struct ipmi_recv_msg *msg,
spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
struct ipmi_file_private *priv = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(file, &priv->wait, wait);
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index ab78b3be7e33..c5112b17d7ea 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -106,7 +106,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
return;
}
- pdev->driver_override = override;
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ override);
+ if (!pdev->driver_override)
+ goto err;
if (type == IPMI_DMI_TYPE_SSIF) {
set_prop_entry(p[pidx++], "i2c-addr", u16, base_addr);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index f45732a2cb3e..01fbffb3168e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -84,7 +84,7 @@ static int panic_op_write_handler(const char *val,
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 07fddbefefe4..bcf493d8e238 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -250,8 +250,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
ipmi->irq = opal_event_request(prop);
}
- if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
- "opal-ipmi", ipmi)) {
+ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+ "opal-ipmi", ipmi);
+ if (rc) {
dev_warn(dev, "Unable to request irq\n");
goto err_dispose;
}
@@ -264,7 +265,6 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
goto err_unregister;
}
- /* todo: query actual ipmi_device_id */
rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
if (rc) {
dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 71fad747c0c7..6768cb2dd740 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1938,8 +1938,10 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->thread != NULL)
+ if (smi_info->thread != NULL) {
kthread_stop(smi_info->thread);
+ smi_info->thread = NULL;
+ }
smi_info->timer_can_start = false;
if (smi_info->timer_running)
@@ -2045,6 +2047,7 @@ static int try_smi_init(struct smi_info *new_smi)
int rv = 0;
int i;
char *init_name = NULL;
+ bool platform_device_registered = false;
pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
@@ -2173,6 +2176,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv);
goto out_err;
}
+ platform_device_registered = true;
}
dev_set_drvdata(new_smi->io.dev, new_smi);
@@ -2279,10 +2283,11 @@ out_err:
}
if (new_smi->pdev) {
- platform_device_unregister(new_smi->pdev);
+ if (platform_device_registered)
+ platform_device_unregister(new_smi->pdev);
+ else
+ platform_device_put(new_smi->pdev);
new_smi->pdev = NULL;
- } else if (new_smi->pdev) {
- platform_device_put(new_smi->pdev);
}
kfree(init_name);
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 9573f1116450..f4214870d726 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
#endif
#ifdef CONFIG_OF
module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(tryopenfirmware, "Setting this to zero will disable the"
" default scan of the interfaces identified via OpenFirmware");
#endif
#ifdef CONFIG_DMI
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 3cfaec728604..f929e72bdac8 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2071,8 +2071,7 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- if (addr_info->client)
- i2c_unregister_device(addr_info->client);
+ i2c_unregister_device(addr_info->client);
list_del(&addr_info->link);
kfree(addr_info);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 76b270678b50..34bc1f3ca414 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -232,7 +232,7 @@ static int set_param_str(const char *val, const struct kernel_param *kp)
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
@@ -298,7 +298,7 @@ module_param(pretimeout, timeout, 0644);
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
module_param(panic_wdt_timeout, timeout, 0644);
-MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds.");
module_param_cb(action, &param_ops_str, action_op, 0644);
MODULE_PARM_DESC(action, "Timeout action. One of: "
@@ -887,9 +887,9 @@ static int ipmi_open(struct inode *ino, struct file *filep)
}
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &read_q, wait);
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 9a1aaf538758..819fe37a3683 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -415,10 +415,10 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
return count;
}
-static unsigned int cm4040_poll(struct file *filp, poll_table *wait)
+static __poll_t cm4040_poll(struct file *filp, poll_table *wait)
{
struct reader_dev *dev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dev->poll_wait, wait);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d256110ba672..7a56d1a13ec3 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -769,10 +769,10 @@ static int pp_release(struct inode *inode, struct file *file)
}
/* No kernel lock held - fine */
-static unsigned int pp_poll(struct file *file, poll_table *wait)
+static __poll_t pp_poll(struct file *file, poll_table *wait)
{
struct pp_struct *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &pp->irq_wait, wait);
if (atomic_read(&pp->irqc))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ec42c8bb9b0d..80f2c326db47 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -431,9 +431,9 @@ static int crng_init = 0;
static int crng_init_cnt = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE]);
+ __u32 out[CHACHA20_BLOCK_WORDS]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -817,7 +817,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u8 block[CHACHA20_BLOCK_SIZE];
+ __u32 block[CHACHA20_BLOCK_WORDS];
__u32 key[8];
} buf;
@@ -851,7 +851,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE])
+ __u32 out[CHACHA20_BLOCK_WORDS])
{
unsigned long v, flags;
@@ -867,7 +867,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
+static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
{
struct crng_state *crng = NULL;
@@ -885,7 +885,7 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -897,14 +897,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = &tmp[used / sizeof(__u32)];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
struct crng_state *crng = NULL;
@@ -920,7 +920,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1507,7 +1507,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -1784,10 +1784,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return ret;
}
-static unsigned int
+static __poll_t
random_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
@@ -2114,7 +2114,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
+ extract_crng((__u32 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2144,7 +2144,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
+ extract_crng(batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 5542a438bbd0..c6a317120a55 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -147,7 +147,7 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait);
+static __poll_t rtc_poll(struct file *file, poll_table *wait);
#endif
static void get_rtc_alm_time(struct rtc_time *alm_tm);
@@ -790,7 +790,7 @@ no_irq:
}
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 6aa32679fd58..7f49fa0f41d7 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -321,10 +321,10 @@ scdrv_write(struct file *file, const char __user *buf,
return status;
}
-static unsigned int
+static __poll_t
scdrv_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status = 0;
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
unsigned long flags;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d3a979e25724..fc041c462aa4 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -940,7 +940,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
if (kfifo_len(&sonypi_device.fifo))
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index a30352202f1f..18c81cbe4704 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -26,6 +26,17 @@ menuconfig TCG_TPM
if TCG_TPM
+config HW_RANDOM_TPM
+ bool "TPM HW Random Number Generator support"
+ depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
+ default y
+ ---help---
+ This setting exposes the TPM's Random Number Generator as a hwrng
+ device. This allows the kernel to collect randomness from the TPM at
+ boot, and provides the TPM randomines in /dev/hwrng.
+
+ If unsure, say Y.
+
config TCG_TIS_CORE
tristate
---help---
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 34b4bcf46f43..acd758381c58 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -6,8 +6,9 @@ obj-$(CONFIG_TCG_TPM) += tpm.o
tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
tpm2-space.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
-tpm-$(CONFIG_OF) += tpm_of.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
+tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
+tpm-$(CONFIG_OF) += tpm_eventlog_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0eca20c5a80c..0a62c19937b6 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -26,8 +26,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/major.h>
+#include <linux/tpm_eventlog.h>
+#include <linux/hw_random.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
DEFINE_IDR(dev_nums_idr);
static DEFINE_MUTEX(idr_lock);
@@ -80,21 +81,26 @@ void tpm_put_ops(struct tpm_chip *chip)
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
- * tpm_chip_find_get() - return tpm_chip for a given chip number
- * @chip_num: id to find
+ * tpm_chip_find_get() - find and reserve a TPM chip
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * The return'd chip has been tpm_try_get_ops'd and must be released via
- * tpm_put_ops
+ * Finds a TPM chip and reserves its class device and operations. The chip must
+ * be released with tpm_chip_put_ops() after use.
+ *
+ * Return:
+ * A reserved &struct tpm_chip instance.
+ * %NULL if a chip is not found.
+ * %NULL if the chip is not available.
*/
-struct tpm_chip *tpm_chip_find_get(int chip_num)
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
{
- struct tpm_chip *chip, *res = NULL;
+ struct tpm_chip *res = NULL;
+ int chip_num = 0;
int chip_prev;
mutex_lock(&idr_lock);
- if (chip_num == TPM_ANY_NUM) {
- chip_num = 0;
+ if (!chip) {
do {
chip_prev = chip_num;
chip = idr_get_next(&dev_nums_idr, &chip_num);
@@ -104,8 +110,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
}
} while (chip_prev != chip_num);
} else {
- chip = idr_find(&dev_nums_idr, chip_num);
- if (chip && !tpm_try_get_ops(chip))
+ if (!tpm_try_get_ops(chip))
res = chip;
}
@@ -387,6 +392,26 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
return 0;
}
+
+static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+
+ return tpm_get_random(chip, data, max);
+}
+
+static int tpm_add_hwrng(struct tpm_chip *chip)
+{
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+ "tpm-rng-%d", chip->dev_num);
+ chip->hwrng.name = chip->hwrng_name;
+ chip->hwrng.read = tpm_hwrng_read;
+ return hwrng_register(&chip->hwrng);
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -419,11 +444,13 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
+ rc = tpm_add_hwrng(chip);
+ if (rc)
+ goto out_ppi;
+
rc = tpm_add_char_device(chip);
- if (rc) {
- tpm_bios_log_teardown(chip);
- return rc;
- }
+ if (rc)
+ goto out_hwrng;
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
@@ -432,6 +459,14 @@ int tpm_chip_register(struct tpm_chip *chip)
}
return 0;
+
+out_hwrng:
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
+out_ppi:
+ tpm_bios_log_teardown(chip);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
@@ -451,6 +486,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
cdev_device_del(&chip->cdevs, &chip->devs);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1d6729be4cd6..76df4fbcf089 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -30,9 +30,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/pm_runtime.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
#define TPM_MAX_ORDINAL 243
#define TSC_MAX_ORDINAL 12
@@ -328,7 +328,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
-static bool tpm_validate_command(struct tpm_chip *chip,
+static int tpm_validate_command(struct tpm_chip *chip,
struct tpm_space *space,
const u8 *cmd,
size_t len)
@@ -340,10 +340,10 @@ static bool tpm_validate_command(struct tpm_chip *chip,
unsigned int nr_handles;
if (len < TPM_HEADER_SIZE)
- return false;
+ return -EINVAL;
if (!space)
- return true;
+ return 0;
if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
cc = be32_to_cpu(header->ordinal);
@@ -352,7 +352,7 @@ static bool tpm_validate_command(struct tpm_chip *chip,
if (i < 0) {
dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
cc);
- return false;
+ return -EOPNOTSUPP;
}
attrs = chip->cc_attrs_tbl[i];
@@ -362,11 +362,11 @@ static bool tpm_validate_command(struct tpm_chip *chip,
goto err_len;
}
- return true;
+ return 0;
err_len:
dev_dbg(&chip->dev,
"%s: insufficient command length %zu", __func__, len);
- return false;
+ return -EINVAL;
}
/**
@@ -391,8 +391,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
unsigned long stop;
bool need_locality;
- if (!tpm_validate_command(chip, space, buf, bufsiz))
- return -EINVAL;
+ rc = tpm_validate_command(chip, space, buf, bufsiz);
+ if (rc == -EINVAL)
+ return rc;
+ /*
+ * If the command is not implemented by the TPM, synthesize a
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
+ */
+ if (rc == -EOPNOTSUPP) {
+ header->length = cpu_to_be32(sizeof(*header));
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+ TSS2_RESMGR_TPM_RC_LAYER);
+ return bufsiz;
+ }
if (bufsiz > TPM_BUFSIZE)
bufsiz = TPM_BUFSIZE;
@@ -413,6 +425,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (chip->dev.parent)
pm_runtime_get_sync(chip->dev.parent);
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* Store the decision as chip->locality will be changed. */
need_locality = chip->locality == -1;
@@ -489,6 +504,9 @@ out:
chip->locality = -1;
}
out_no_locality:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
if (chip->dev.parent)
pm_runtime_put_sync(chip->dev.parent);
@@ -809,19 +827,20 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
}
/**
- * tpm_is_tpm2 - is the chip a TPM2 chip?
- * @chip_num: tpm idx # or ANY
+ * tpm_is_tpm2 - do we a have a TPM2 chip?
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * Returns < 0 on error, and 1 or 0 on success depending whether the chip
- * is a TPM2 chip.
+ * Return:
+ * 1 if we have a TPM2 chip.
+ * 0 if we don't have a TPM2 chip.
+ * A negative number for system errors (errno).
*/
-int tpm_is_tpm2(u32 chip_num)
+int tpm_is_tpm2(struct tpm_chip *chip)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
@@ -833,23 +852,19 @@ int tpm_is_tpm2(u32 chip_num)
EXPORT_SYMBOL_GPL(tpm_is_tpm2);
/**
- * tpm_pcr_read - read a pcr value
- * @chip_num: tpm idx # or ANY
- * @pcr_idx: pcr idx to retrieve
- * @res_buf: TPM_PCR value
- * size of res_buf is 20 bytes (or NULL if you don't care)
+ * tpm_pcr_read - read a PCR value from SHA1 bank
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @res_buf: the value of the PCR
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
+int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
@@ -889,25 +904,26 @@ static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash,
}
/**
- * tpm_pcr_extend - extend pcr value with hash
- * @chip_num: tpm idx # or AN&
- * @pcr_idx: pcr idx to extend
- * @hash: hash value used to extend pcr value
+ * tpm_pcr_extend - extend a PCR value in SHA1 bank.
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @hash: the hash value used to extend the PCR value
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Note: with TPM 2.0 extends also those banks with a known digest size to the
+ * cryto subsystem in order to prevent malicious use of those PCR banks. In the
+ * future we should dynamically determine digest sizes.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
+int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
{
int rc;
- struct tpm_chip *chip;
struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
u32 count = 0;
int i;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1019,82 +1035,29 @@ out:
return rc;
}
-int tpm_send(u32 chip_num, void *cmd, size_t buflen)
+/**
+ * tpm_send - send a TPM command
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @cmd: a TPM command buffer
+ * @buflen: the length of the TPM command buffer
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = tpm_transmit_cmd(chip, NULL, cmd, buflen, 0, 0,
- "attempting tpm_cmd");
+ "attempting to a send a command");
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
- bool check_cancel, bool *canceled)
-{
- u8 status = chip->ops->status(chip);
-
- *canceled = false;
- if ((status & mask) == mask)
- return true;
- if (check_cancel && chip->ops->req_canceled(chip, status)) {
- *canceled = true;
- return true;
- }
- return false;
-}
-
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel)
-{
- unsigned long stop;
- long rc;
- u8 status;
- bool canceled = false;
-
- /* check current status */
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
-
- stop = jiffies + timeout;
-
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -ETIME;
- rc = wait_event_interruptible_timeout(*queue,
- wait_for_tpm_stat_cond(chip, mask, check_cancel,
- &canceled),
- timeout);
- if (rc > 0) {
- if (canceled)
- return -ECANCELED;
- return 0;
- }
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- tpm_msleep(TPM_TIMEOUT);
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
- }
- return -ETIME;
-}
-EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
-
#define TPM_ORD_SAVESTATE 152
#define SAVESTATE_RESULT_SIZE 10
@@ -1187,16 +1150,15 @@ static const struct tpm_input_header tpm_getrandom_header = {
};
/**
- * tpm_get_random() - Get random bytes from the tpm's RNG
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @out: destination buffer for the random bytes
- * @max: the max number of bytes to write to @out
+ * tpm_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
*
- * Returns < 0 on error and the number of bytes read on success
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
{
- struct tpm_chip *chip;
struct tpm_cmd_t tpm_cmd;
u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA), rlength;
int err, total = 0, retries = 5;
@@ -1205,8 +1167,8 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1248,22 +1210,23 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
EXPORT_SYMBOL_GPL(tpm_get_random);
/**
- * tpm_seal_trusted() - seal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * tpm_seal_trusted() - seal a trusted key payload
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_seal_trusted(chip, payload, options);
@@ -1275,21 +1238,23 @@ EXPORT_SYMBOL_GPL(tpm_seal_trusted);
/**
* tpm_unseal_trusted() - unseal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_unseal_trusted(chip, payload, options);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 528cffbd49d3..f895fba4e20d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/hw_random.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
@@ -34,6 +35,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include <linux/tpm_eventlog.h>
#include <crypto/hash_info.h>
#ifdef CONFIG_X86
@@ -93,12 +95,17 @@ enum tpm2_structures {
TPM2_ST_SESSIONS = 0x8002,
};
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
enum tpm2_return_codes {
TPM2_RC_SUCCESS = 0x0000,
TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
TPM2_RC_HANDLE = 0x008B,
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_COMMAND_CODE = 0x0143,
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
};
@@ -210,6 +217,9 @@ struct tpm_chip {
int dev_num; /* /dev/tpm# */
unsigned long is_open; /* only one allowed */
+ char hwrng_name[64];
+ struct hwrng hwrng;
+
struct mutex tpm_mutex; /* tpm is processing */
unsigned long timeout_a; /* jiffies */
@@ -385,10 +395,6 @@ struct tpm_cmd_t {
tpm_cmd_params params;
} __packed;
-struct tpm2_digest {
- u16 alg_id;
- u8 digest[SHA512_DIGEST_SIZE];
-} __packed;
/* A string buffer type for constructing TPM commands. This is based on the
* ideas of string buffer code in security/keys/trusted.h but is heap based
@@ -512,16 +518,14 @@ int tpm_do_selftest(struct tpm_chip *chip);
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm_pm_suspend(struct device *dev);
int tpm_pm_resume(struct device *dev);
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel);
static inline void tpm_msleep(unsigned int delay_msec)
{
- usleep_range(delay_msec * 1000,
- (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
+ usleep_range((delay_msec * 1000) - TPM_TIMEOUT_RANGE_US,
+ delay_msec * 1000);
};
-struct tpm_chip *tpm_chip_find_get(int chip_num);
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -575,4 +579,34 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
u8 *cmd);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
+
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm1_eventlog.c b/drivers/char/tpm/tpm1_eventlog.c
index 9a8605e500b5..add798bd69d0 100644
--- a/drivers/char/tpm/tpm1_eventlog.c
+++ b/drivers/char/tpm/tpm1_eventlog.c
@@ -21,13 +21,14 @@
*/
#include <linux/seq_file.h>
+#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
static const char* tcpa_event_type_strings[] = {
@@ -371,6 +372,10 @@ static int tpm_read_log(struct tpm_chip *chip)
if (rc != -ENODEV)
return rc;
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
return tpm_read_log_of(chip);
}
@@ -388,11 +393,13 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
unsigned int cnt;
+ int log_version;
int rc = 0;
rc = tpm_read_log(chip);
- if (rc)
+ if (rc < 0)
return rc;
+ log_version = rc;
cnt = 0;
chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
@@ -404,7 +411,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
chip->bin_log_seqops.chip = chip;
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
chip->bin_log_seqops.seqops =
&tpm2_binary_b_measurements_seqops;
else
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index f40d20671a78..c17e75348a99 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -849,28 +849,26 @@ static const struct tpm_input_header tpm2_selftest_header = {
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int delay_msec = 20;
+ unsigned int delay_msec = 10;
long duration;
struct tpm2_cmd cmd;
duration = jiffies_to_msecs(
tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- while (duration > 0) {
+ while (1) {
cmd.header.in = tpm2_selftest_header;
cmd.params.selftest_in.full_test = 0;
rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
0, 0, "continue selftest");
- if (rc != TPM2_RC_TESTING)
+ if (rc != TPM2_RC_TESTING || delay_msec >= duration)
break;
- tpm_msleep(delay_msec);
- duration -= delay_msec;
-
- /* wait longer the next round */
+ /* wait longer than before */
delay_msec *= 2;
+ tpm_msleep(delay_msec);
}
return rc;
diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/tpm2_eventlog.c
index 34a8afa69138..1ce4411292ba 100644
--- a/drivers/char/tpm/tpm2_eventlog.c
+++ b/drivers/char/tpm/tpm2_eventlog.c
@@ -21,9 +21,9 @@
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
/*
* calc_tpm2_event_size() - calculate the event size, where event
diff --git a/drivers/char/tpm/tpm_eventlog.h b/drivers/char/tpm/tpm_eventlog.h
deleted file mode 100644
index 204466cc4d05..000000000000
--- a/drivers/char/tpm/tpm_eventlog.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __TPM_EVENTLOG_H__
-#define __TPM_EVENTLOG_H__
-
-#include <crypto/hash_info.h>
-
-#define TCG_EVENT_NAME_LEN_MAX 255
-#define MAX_TEXT_EVENT 1000 /* Max event string length */
-#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
-#define TPM2_ACTIVE_PCR_BANKS 3
-
-#ifdef CONFIG_PPC64
-#define do_endian_conversion(x) be32_to_cpu(x)
-#else
-#define do_endian_conversion(x) x
-#endif
-
-enum bios_platform_class {
- BIOS_CLIENT = 0x00,
- BIOS_SERVER = 0x01,
-};
-
-struct tcpa_event {
- u32 pcr_index;
- u32 event_type;
- u8 pcr_value[20]; /* SHA1 */
- u32 event_size;
- u8 event_data[0];
-};
-
-enum tcpa_event_types {
- PREBOOT = 0,
- POST_CODE,
- UNUSED,
- NO_ACTION,
- SEPARATOR,
- ACTION,
- EVENT_TAG,
- SCRTM_CONTENTS,
- SCRTM_VERSION,
- CPU_MICROCODE,
- PLATFORM_CONFIG_FLAGS,
- TABLE_OF_DEVICES,
- COMPACT_HASH,
- IPL,
- IPL_PARTITION_DATA,
- NONHOST_CODE,
- NONHOST_CONFIG,
- NONHOST_INFO,
-};
-
-struct tcpa_pc_event {
- u32 event_id;
- u32 event_size;
- u8 event_data[0];
-};
-
-enum tcpa_pc_event_ids {
- SMBIOS = 1,
- BIS_CERT,
- POST_BIOS_ROM,
- ESCD,
- CMOS,
- NVRAM,
- OPTION_ROM_EXEC,
- OPTION_ROM_CONFIG,
- OPTION_ROM_MICROCODE = 10,
- S_CRTM_VERSION,
- S_CRTM_CONTENTS,
- POST_CONTENTS,
- HOST_TABLE_OF_DEVICES,
-};
-
-/* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */
-
-struct tcg_efi_specid_event_algs {
- u16 alg_id;
- u16 digest_size;
-} __packed;
-
-struct tcg_efi_specid_event {
- u8 signature[16];
- u32 platform_class;
- u8 spec_version_minor;
- u8 spec_version_major;
- u8 spec_errata;
- u8 uintnsize;
- u32 num_algs;
- struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS];
- u8 vendor_info_size;
- u8 vendor_info[0];
-} __packed;
-
-struct tcg_pcr_event {
- u32 pcr_idx;
- u32 event_type;
- u8 digest[20];
- u32 event_size;
- u8 event[0];
-} __packed;
-
-struct tcg_event_field {
- u32 event_size;
- u8 event[0];
-} __packed;
-
-struct tcg_pcr_event2 {
- u32 pcr_idx;
- u32 event_type;
- u32 count;
- struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS];
- struct tcg_event_field event;
-} __packed;
-
-extern const struct seq_operations tpm2_binary_b_measurements_seqops;
-
-#if defined(CONFIG_ACPI)
-int tpm_read_log_acpi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_acpi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_OF)
-int tpm_read_log_of(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_of(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-
-int tpm_bios_log_setup(struct tpm_chip *chip);
-void tpm_bios_log_teardown(struct tpm_chip *chip);
-
-#endif
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_eventlog_acpi.c
index 169edf3ce86d..66f19e93c216 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_eventlog_acpi.c
@@ -25,9 +25,9 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
struct acpi_tcpa {
struct acpi_table_header hdr;
@@ -102,7 +102,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
- return 0;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
err:
kfree(log->bios_event_log);
diff --git a/drivers/char/tpm/tpm_eventlog_efi.c b/drivers/char/tpm/tpm_eventlog_efi.c
new file mode 100644
index 000000000000..e3f9ffd341d2
--- /dev/null
+++ b/drivers/char/tpm/tpm_eventlog_efi.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Google
+ *
+ * Authors:
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+/* read binary bios log from EFI configuration table */
+int tpm_read_log_efi(struct tpm_chip *chip)
+{
+
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct tpm_bios_log *log;
+ u32 log_size;
+ u8 tpm_log_version;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ log = &chip->log;
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table !\n");
+ return -ENOMEM;
+ }
+
+ log_size = log_tbl->size;
+ memunmap(log_tbl);
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+ MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table payload!\n");
+ return -ENOMEM;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = kmalloc(log_size, GFP_KERNEL);
+ if (!log->bios_event_log)
+ goto err_memunmap;
+ memcpy(log->bios_event_log, log_tbl->log, log_size);
+ log->bios_event_log_end = log->bios_event_log + log_size;
+
+ tpm_log_version = log_tbl->version;
+ memunmap(log_tbl);
+ return tpm_log_version;
+
+err_memunmap:
+ memunmap(log_tbl);
+ return -ENOMEM;
+}
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_eventlog_of.c
index aadb7f464076..96fd5646f866 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_eventlog_of.c
@@ -17,9 +17,9 @@
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
int tpm_read_log_of(struct tpm_chip *chip)
{
@@ -76,5 +76,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
memcpy(log->bios_event_log, __va(base), size);
- return 0;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 79d6bbb58e39..c1dd39eaaeeb 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -665,9 +665,9 @@ out_err:
}
static const struct i2c_device_id tpm_tis_i2c_table[] = {
- {"tpm_i2c_infineon", 0},
- {"slb9635tt", 0},
- {"slb9645tt", 1},
+ {"tpm_i2c_infineon"},
+ {"slb9635tt"},
+ {"slb9645tt"},
{},
};
@@ -675,24 +675,9 @@ MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
#ifdef CONFIG_OF
static const struct of_device_id tpm_tis_i2c_of_match[] = {
- {
- .name = "tpm_i2c_infineon",
- .type = "tpm",
- .compatible = "infineon,tpm_i2c_infineon",
- .data = (void *)0
- },
- {
- .name = "slb9635tt",
- .type = "tpm",
- .compatible = "infineon,slb9635tt",
- .data = (void *)0
- },
- {
- .name = "slb9645tt",
- .type = "tpm",
- .compatible = "infineon,slb9645tt",
- .data = (void *)1
- },
+ {.compatible = "infineon,tpm_i2c_infineon"},
+ {.compatible = "infineon,slb9635tt"},
+ {.compatible = "infineon,slb9645tt"},
{},
};
MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e2d1055fb814..f08949a5f678 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -133,93 +133,14 @@ static int check_acpi_tpm2(struct device *dev)
}
#endif
-#ifdef CONFIG_X86
-#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
-#define ILB_REMAP_SIZE 0x100
-#define LPC_CNTRL_REG_OFFSET 0x84
-#define LPC_CLKRUN_EN (1 << 2)
-
-static void __iomem *ilb_base_addr;
-
-static inline bool is_bsw(void)
-{
- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
-}
-
-/**
- * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
- */
-static void tpm_platform_begin_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Disable LPC CLKRUN# */
- clkrun_val &= ~LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-
-/**
- * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
- */
-static void tpm_platform_end_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Enable LPC CLKRUN# */
- clkrun_val |= LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-#else
-static inline bool is_bsw(void)
-{
- return false;
-}
-
-static void tpm_platform_begin_xfer(void)
-{
-}
-
-static void tpm_platform_end_xfer(void)
-{
-}
-#endif
-
static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
*result++ = ioread8(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -228,13 +149,9 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
iowrite8(*value++, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -242,12 +159,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread16(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -255,12 +168,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread32(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -268,12 +177,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
iowrite32(value, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -461,11 +366,6 @@ static int __init init_tis(void)
if (rc)
goto err_force;
-#ifdef CONFIG_X86
- if (is_bsw())
- ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
- ILB_REMAP_SIZE);
-#endif
rc = platform_driver_register(&tis_drv);
if (rc)
goto err_platform;
@@ -484,10 +384,6 @@ err_pnp:
err_platform:
if (force_pdev)
platform_device_unregister(force_pdev);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
err_force:
return rc;
}
@@ -497,10 +393,6 @@ static void __exit cleanup_tis(void)
pnp_unregister_driver(&tis_pnp_driver);
platform_driver_unregister(&tis_drv);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
if (force_pdev)
platform_device_unregister(force_pdev);
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index fdde971bc810..183a5f54d875 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -31,6 +31,74 @@
#include "tpm.h"
#include "tpm_tis_core.h"
+/* This is a polling delay to check for status and burstcount.
+ * As per ddwg input, expectation is that status check and burstcount
+ * check should return within few usecs.
+ */
+#define TPM_POLL_SLEEP 1 /* msec */
+
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_POLL_SLEEP);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
/* Before we attempt to access the TPM we must see that the valid bit is set.
* The specification says that this bit is 0 at reset and remains 0 until the
* 'TPM has gone through its self test and initialization and has established
@@ -164,7 +232,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- tpm_msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_POLL_SLEEP);
} while (time_before(jiffies, stop));
return -EBUSY;
}
@@ -421,19 +489,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
int i, rc;
u32 did_vid;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
if (rc < 0)
- return rc;
+ goto out;
for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
if (vendor_timeout_overrides[i].did_vid != did_vid)
continue;
memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
sizeof(vendor_timeout_overrides[i].timeout_us));
- return true;
+ rc = true;
}
- return false;
+ rc = false;
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return rc;
}
/*
@@ -653,14 +730,73 @@ void tpm_tis_remove(struct tpm_chip *chip)
u32 interrupt;
int rc;
+ tpm_tis_clkrun_enable(chip, true);
+
rc = tpm_tis_read32(priv, reg, &interrupt);
if (rc < 0)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+
+ tpm_tis_clkrun_enable(chip, false);
+
+ if (priv->ilb_base_addr)
+ iounmap(priv->ilb_base_addr);
}
EXPORT_SYMBOL_GPL(tpm_tis_remove);
+/**
+ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
+ * of a single TPM command
+ * @chip: TPM chip to use
+ * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
+ * 0 - Enable CLKRUN protocol
+ * Call this function directly in tpm_tis_remove() in error or driver removal
+ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
+ */
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
+{
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ u32 clkrun_val;
+
+ if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
+ !data->ilb_base_addr)
+ return;
+
+ if (value) {
+ data->clkrun_enabled++;
+ if (data->clkrun_enabled > 1)
+ return;
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Disable LPC CLKRUN# */
+ clkrun_val &= ~LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ } else {
+ data->clkrun_enabled--;
+ if (data->clkrun_enabled)
+ return;
+
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Enable LPC CLKRUN# */
+ clkrun_val |= LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ }
+}
+
static const struct tpm_class_ops tpm_tis = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = tpm_tis_status,
@@ -673,13 +809,17 @@ static const struct tpm_class_ops tpm_tis = {
.req_canceled = tpm_tis_req_canceled,
.request_locality = request_locality,
.relinquish_locality = release_locality,
+ .clk_enable = tpm_tis_clkrun_enable,
};
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
acpi_handle acpi_dev_handle)
{
- u32 vendor, intfcaps, intmask;
+ u32 vendor;
+ u32 intfcaps;
+ u32 intmask;
+ u32 clkrun_val;
u8 rid;
int rc, probe;
struct tpm_chip *chip;
@@ -700,6 +840,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->phy_ops = phy_ops;
dev_set_drvdata(&chip->dev, priv);
+ if (is_bsw()) {
+ priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ ILB_REMAP_SIZE);
+ if (!priv->ilb_base_addr)
+ return -ENOMEM;
+
+ clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
+ /* Check if CLKRUN# is already not enabled in the LPC bus */
+ if (!(clkrun_val & LPC_CLKRUN_EN)) {
+ iounmap(priv->ilb_base_addr);
+ priv->ilb_base_addr = NULL;
+ }
+ }
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
@@ -790,9 +947,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
}
}
- return tpm_chip_register(chip);
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto out_err;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return 0;
out_err:
+ if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ chip->ops->clk_enable(chip, false);
+
tpm_tis_remove(chip);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_tis_core_init);
@@ -804,22 +972,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* reenable interrupts that device may have lost or
* BIOS/firmware may have disabled
*/
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
if (rc < 0)
- return;
+ goto out;
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
- return;
+ goto out;
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return;
}
int tpm_tis_resume(struct device *dev)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6bbac319ff3b..d5c6a2e952b3 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -79,6 +79,11 @@ enum tis_defaults {
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))
+#define LPC_CNTRL_OFFSET 0x84
+#define LPC_CLKRUN_EN (1 << 2)
+#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+#define ILB_REMAP_SIZE 0x100
+
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
};
@@ -89,6 +94,8 @@ struct tpm_tis_data {
int irq;
bool irq_tested;
unsigned int flags;
+ void __iomem *ilb_base_addr;
+ u16 clkrun_enabled;
wait_queue_head_t int_queue;
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
@@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return data->phy_ops->write32(data, addr, value);
}
+static inline bool is_bsw(void)
+{
+#ifdef CONFIG_X86
+ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+#else
+ return false;
+#endif
+}
+
void tpm_tis_remove(struct tpm_chip *chip);
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 1d877cc9af97..674218b50b13 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -173,10 +173,10 @@ static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
*
* Return: Poll flags
*/
-static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
{
struct proxy_dev *proxy_dev = filp->private_data;
- unsigned ret;
+ __poll_t ret;
poll_wait(filp, &proxy_dev->wq, wait);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 656e8af95d52..911475d36800 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/freezer.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/io/tpmif.h>
@@ -39,6 +40,66 @@ enum status_bits {
VTPM_STATUS_CANCELED = 0x8,
};
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
static u8 vtpm_status(struct tpm_chip *chip)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d1aed2513bd9..813a2e46824d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -982,10 +982,10 @@ error_out:
return ret;
}
-static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
{
struct port *port;
- unsigned int ret;
+ __poll_t ret;
port = filp->private_data;
poll_wait(filp, &port->waitqueue, wait);
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index b6c9cdead7f3..88e1cf475d3f 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1736,10 +1736,10 @@ end:
return pos;
}
-static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
+static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
{
struct xilly_channel *channel = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(filp, &channel->endpoint->ep_wait, wait);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c729a88007d0..b3b4ed9b6874 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -269,6 +269,7 @@ config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO
+ select TIMER_OF
config CLKSRC_MPS2
bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
@@ -441,6 +442,13 @@ config MTK_TIMER
help
Support for Mediatek timer driver.
+config SPRD_TIMER
+ bool "Spreadtrum timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF
+ help
+ Enables support for the Spreadtrum timer driver.
+
config SYS_SUPPORTS_SH_MTU2
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 72711f1491e3..d6dec4489d66 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c
index c68630565079..ea00a5e8f95d 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/owl-timer.c
@@ -168,5 +168,6 @@ static int __init owl_timer_init(struct device_node *node)
return 0;
}
-CLOCKSOURCE_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
-CLOCKSOURCE_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 9de47d4d2d9e..43f4d5c4d6fa 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -384,7 +384,7 @@ static int __init tcb_clksrc_init(void)
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
- ((divided_rate + 500000) % 1000000) / 1000);
+ ((divided_rate % 1000000) + 500) / 1000);
if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
/* use apropriate function to read 32 bit counter */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index a31990408153..06ed88a2a8a0 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -24,7 +24,13 @@
#include "timer-of.h"
-static __init void timer_irq_exit(struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_exit - Release the interrupt
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Free the irq resource
+ */
+static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
{
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -34,8 +40,24 @@ static __init void timer_irq_exit(struct of_timer_irq *of_irq)
free_irq(of_irq->irq, clkevt);
}
-static __init int timer_irq_init(struct device_node *np,
- struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_init - Request the interrupt
+ * @np: a device tree node pointer
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Get the interrupt number from the DT from its definition and
+ * request it. The interrupt is gotten by falling back the following way:
+ *
+ * - Get interrupt number by name
+ * - Get interrupt number by index
+ *
+ * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+ * otherwise 'request_irq()' is used.
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_irq_init(struct device_node *np,
+ struct of_timer_irq *of_irq)
{
int ret;
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -72,15 +94,30 @@ static __init int timer_irq_init(struct device_node *np,
return 0;
}
-static __init void timer_clk_exit(struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_exit - Release the clock resources
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Disables and releases the refcount on the clk
+ */
+static __init void timer_of_clk_exit(struct of_timer_clk *of_clk)
{
of_clk->rate = 0;
clk_disable_unprepare(of_clk->clk);
clk_put(of_clk->clk);
}
-static __init int timer_clk_init(struct device_node *np,
- struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_init - Initialize the clock resources
+ * @np: a device tree node pointer
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Get the clock by name or by index, enable it and get the rate
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_clk_init(struct device_node *np,
+ struct of_timer_clk *of_clk)
{
int ret;
@@ -116,19 +153,19 @@ out_clk_put:
goto out;
}
-static __init void timer_base_exit(struct of_timer_base *of_base)
+static __init void timer_of_base_exit(struct of_timer_base *of_base)
{
iounmap(of_base->base);
}
-static __init int timer_base_init(struct device_node *np,
- struct of_timer_base *of_base)
+static __init int timer_of_base_init(struct device_node *np,
+ struct of_timer_base *of_base)
{
- const char *name = of_base->name ? of_base->name : np->full_name;
-
- of_base->base = of_io_request_and_map(np, of_base->index, name);
+ of_base->base = of_base->name ?
+ of_io_request_and_map(np, of_base->index, of_base->name) :
+ of_iomap(np, of_base->index);
if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", name);
+ pr_err("Failed to iomap (%s)\n", of_base->name);
return PTR_ERR(of_base->base);
}
@@ -141,21 +178,21 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
int flags = 0;
if (to->flags & TIMER_OF_BASE) {
- ret = timer_base_init(np, &to->of_base);
+ ret = timer_of_base_init(np, &to->of_base);
if (ret)
goto out_fail;
flags |= TIMER_OF_BASE;
}
if (to->flags & TIMER_OF_CLOCK) {
- ret = timer_clk_init(np, &to->of_clk);
+ ret = timer_of_clk_init(np, &to->of_clk);
if (ret)
goto out_fail;
flags |= TIMER_OF_CLOCK;
}
if (to->flags & TIMER_OF_IRQ) {
- ret = timer_irq_init(np, &to->of_irq);
+ ret = timer_of_irq_init(np, &to->of_irq);
if (ret)
goto out_fail;
flags |= TIMER_OF_IRQ;
@@ -163,17 +200,20 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
if (!to->clkevt.name)
to->clkevt.name = np->name;
+
+ to->np = np;
+
return ret;
out_fail:
if (flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
return ret;
}
@@ -187,11 +227,11 @@ out_fail:
void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (to->flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (to->flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
}
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index 3f708f1be43d..a5478f3e8589 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -33,6 +33,7 @@ struct of_timer_clk {
struct timer_of {
unsigned int flags;
+ struct device_node *np;
struct clock_event_device clkevt;
struct of_timer_base of_base;
struct of_timer_irq of_irq;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
new file mode 100644
index 000000000000..ef9ebeafb3ed
--- /dev/null
+++ b/drivers/clocksource/timer-sprd.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "timer-of.h"
+
+#define TIMER_NAME "sprd_timer"
+
+#define TIMER_LOAD_LO 0x0
+#define TIMER_LOAD_HI 0x4
+#define TIMER_VALUE_LO 0x8
+#define TIMER_VALUE_HI 0xc
+
+#define TIMER_CTL 0x10
+#define TIMER_CTL_PERIOD_MODE BIT(0)
+#define TIMER_CTL_ENABLE BIT(1)
+#define TIMER_CTL_64BIT_WIDTH BIT(16)
+
+#define TIMER_INT 0x14
+#define TIMER_INT_EN BIT(0)
+#define TIMER_INT_RAW_STS BIT(1)
+#define TIMER_INT_MASK_STS BIT(2)
+#define TIMER_INT_CLR BIT(3)
+
+#define TIMER_VALUE_SHDW_LO 0x18
+#define TIMER_VALUE_SHDW_HI 0x1c
+
+#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+
+static void sprd_timer_enable(void __iomem *base, u32 flag)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val |= TIMER_CTL_ENABLE;
+ if (flag & TIMER_CTL_64BIT_WIDTH)
+ val |= TIMER_CTL_64BIT_WIDTH;
+ else
+ val &= ~TIMER_CTL_64BIT_WIDTH;
+
+ if (flag & TIMER_CTL_PERIOD_MODE)
+ val |= TIMER_CTL_PERIOD_MODE;
+ else
+ val &= ~TIMER_CTL_PERIOD_MODE;
+
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_disable(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val &= ~TIMER_CTL_ENABLE;
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_update_counter(void __iomem *base, unsigned long cycles)
+{
+ writel_relaxed(cycles & TIMER_VALUE_LO_MASK, base + TIMER_LOAD_LO);
+ writel_relaxed(0, base + TIMER_LOAD_HI);
+}
+
+static void sprd_timer_enable_interrupt(void __iomem *base)
+{
+ writel_relaxed(TIMER_INT_EN, base + TIMER_INT);
+}
+
+static void sprd_timer_clear_interrupt(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_INT);
+
+ val |= TIMER_INT_CLR;
+ writel_relaxed(val, base + TIMER_INT);
+}
+
+static int sprd_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), cycles);
+ sprd_timer_enable(timer_of_base(to), 0);
+
+ return 0;
+}
+
+static int sprd_timer_set_periodic(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
+ sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static int sprd_timer_shutdown(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ return 0;
+}
+
+static irqreturn_t sprd_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_clear_interrupt(timer_of_base(to));
+
+ if (clockevent_state_oneshot(ce))
+ sprd_timer_disable(timer_of_base(to));
+
+ ce->event_handler(ce);
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = TIMER_NAME,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = sprd_timer_shutdown,
+ .set_state_periodic = sprd_timer_set_periodic,
+ .set_next_event = sprd_timer_set_next_event,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .handler = sprd_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init sprd_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &to);
+ if (ret)
+ return ret;
+
+ sprd_timer_enable_interrupt(timer_of_base(&to));
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ 1, UINT_MAX);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 8f2423789ba9..e5cdc3af684c 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -16,175 +17,318 @@
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/reset.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include "timer-of.h"
#define TIM_CR1 0x00
#define TIM_DIER 0x0c
#define TIM_SR 0x10
#define TIM_EGR 0x14
+#define TIM_CNT 0x24
#define TIM_PSC 0x28
#define TIM_ARR 0x2c
+#define TIM_CCR1 0x34
#define TIM_CR1_CEN BIT(0)
+#define TIM_CR1_UDIS BIT(1)
#define TIM_CR1_OPM BIT(3)
#define TIM_CR1_ARPE BIT(7)
#define TIM_DIER_UIE BIT(0)
+#define TIM_DIER_CC1IE BIT(1)
#define TIM_SR_UIF BIT(0)
#define TIM_EGR_UG BIT(0)
-struct stm32_clock_event_ddata {
- struct clock_event_device evtdev;
- unsigned periodic_top;
- void __iomem *base;
+#define TIM_PSC_MAX USHRT_MAX
+#define TIM_PSC_CLKRATE 10000
+
+struct stm32_timer_private {
+ int bits;
};
-static int stm32_clock_event_shutdown(struct clock_event_device *evtdev)
+/**
+ * stm32_timer_of_bits_set - set accessor helper
+ * @to: a timer_of structure pointer
+ * @bits: the number of bits (16 or 32)
+ *
+ * Accessor helper to set the number of bits in the timer-of private
+ * structure.
+ *
+ */
+static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ struct stm32_timer_private *pd = to->private_data;
- writel_relaxed(0, base + TIM_CR1);
- return 0;
+ pd->bits = bits;
+}
+
+/**
+ * stm32_timer_of_bits_get - get accessor helper
+ * @to: a timer_of structure pointer
+ *
+ * Accessor helper to get the number of bits in the timer-of private
+ * structure.
+ *
+ * Returns an integer corresponding to the number of bits.
+ */
+static int stm32_timer_of_bits_get(struct timer_of *to)
+{
+ struct stm32_timer_private *pd = to->private_data;
+
+ return pd->bits;
+}
+
+static void __iomem *stm32_timer_cnt __read_mostly;
+
+static u64 notrace stm32_read_sched_clock(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
+}
+
+static struct delay_timer stm32_timer_delay;
+
+static unsigned long stm32_read_delay(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
}
-static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+static void stm32_clock_event_disable(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ writel_relaxed(0, timer_of_base(to) + TIM_DIER);
+}
+
+/**
+ * stm32_timer_start - Start the counter without event
+ * @to: a timer_of structure pointer
+ *
+ * Start the timer in order to have the counter reset and start
+ * incrementing but disable interrupt event when there is a counter
+ * overflow. By default, the counter direction is used as upcounter.
+ */
+static void stm32_timer_start(struct timer_of *to)
+{
+ writel_relaxed(TIM_CR1_UDIS | TIM_CR1_CEN, timer_of_base(to) + TIM_CR1);
+}
+
+static int stm32_clock_event_shutdown(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_clock_event_disable(to);
- writel_relaxed(data->periodic_top, base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
return 0;
}
static int stm32_clock_event_set_next_event(unsigned long evt,
- struct clock_event_device *evtdev)
+ struct clock_event_device *clkevt)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+ struct timer_of *to = to_timer_of(clkevt);
+ unsigned long now, next;
+
+ next = readl_relaxed(timer_of_base(to) + TIM_CNT) + evt;
+ writel_relaxed(next, timer_of_base(to) + TIM_CCR1);
+ now = readl_relaxed(timer_of_base(to) + TIM_CNT);
+
+ if ((next - now) > evt)
+ return -ETIME;
- writel_relaxed(evt, data->base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_OPM | TIM_CR1_CEN,
- data->base + TIM_CR1);
+ writel_relaxed(TIM_DIER_CC1IE, timer_of_base(to) + TIM_DIER);
+
+ return 0;
+}
+
+static int stm32_clock_event_set_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
+
+ return stm32_clock_event_set_next_event(timer_of_period(to), clkevt);
+}
+
+static int stm32_clock_event_set_oneshot(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
return 0;
}
static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
{
- struct stm32_clock_event_ddata *data = dev_id;
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- writel_relaxed(0, data->base + TIM_SR);
+ if (clockevent_state_periodic(clkevt))
+ stm32_clock_event_set_periodic(clkevt);
+ else
+ stm32_clock_event_shutdown(clkevt);
- data->evtdev.event_handler(&data->evtdev);
+ clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
-static struct stm32_clock_event_ddata clock_event_ddata = {
- .evtdev = {
- .name = "stm32 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_state_shutdown = stm32_clock_event_shutdown,
- .set_state_periodic = stm32_clock_event_set_periodic,
- .set_state_oneshot = stm32_clock_event_shutdown,
- .tick_resume = stm32_clock_event_shutdown,
- .set_next_event = stm32_clock_event_set_next_event,
- .rating = 200,
- },
-};
+/**
+ * stm32_timer_width - Sort out the timer width (32/16)
+ * @to: a pointer to a timer-of structure
+ *
+ * Write the 32-bit max value and read/return the result. If the timer
+ * is 32 bits wide, the result will be UINT_MAX, otherwise it will
+ * be truncated by the 16-bit register to USHRT_MAX.
+ *
+ */
+static void __init stm32_timer_set_width(struct timer_of *to)
+{
+ u32 width;
+
+ writel_relaxed(UINT_MAX, timer_of_base(to) + TIM_ARR);
+
+ width = readl_relaxed(timer_of_base(to) + TIM_ARR);
+
+ stm32_timer_of_bits_set(to, width == UINT_MAX ? 32 : 16);
+}
-static int __init stm32_clockevent_init(struct device_node *np)
+/**
+ * stm32_timer_set_prescaler - Compute and set the prescaler register
+ * @to: a pointer to a timer-of structure
+ *
+ * Depending on the timer width, compute the prescaler to always
+ * target a 10MHz timer rate for 16 bits. 32-bit timers are
+ * considered precise and long enough to not use the prescaler.
+ */
+static void __init stm32_timer_set_prescaler(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data = &clock_event_ddata;
- struct clk *clk;
- struct reset_control *rstc;
- unsigned long rate, max_delta;
- int irq, ret, bits, prescaler = 1;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clockevent (%d)\n", ret);
- goto err_clk_get;
+ int prescaler = 1;
+
+ if (stm32_timer_of_bits_get(to) != 32) {
+ prescaler = DIV_ROUND_CLOSEST(timer_of_rate(to),
+ TIM_PSC_CLKRATE);
+ /*
+ * The prescaler register is an u16, the variable
+ * can't be greater than TIM_PSC_MAX, let's cap it in
+ * this case.
+ */
+ prescaler = prescaler < TIM_PSC_MAX ? prescaler : TIM_PSC_MAX;
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clockevent (%d)\n",
- ret);
- goto err_clk_enable;
- }
+ writel_relaxed(prescaler - 1, timer_of_base(to) + TIM_PSC);
+ writel_relaxed(TIM_EGR_UG, timer_of_base(to) + TIM_EGR);
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- rate = clk_get_rate(clk);
+ /* Adjust rate and period given the prescaler value */
+ to->of_clk.rate = DIV_ROUND_CLOSEST(to->of_clk.rate, prescaler);
+ to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
+}
- rstc = of_reset_control_get(np, NULL);
- if (!IS_ERR(rstc)) {
- reset_control_assert(rstc);
- reset_control_deassert(rstc);
+static int __init stm32_clocksource_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
+ const char *name = to->np->full_name;
+
+ /*
+ * This driver allows to register several timers and relies on
+ * the generic time framework to select the right one.
+ * However, nothing allows to do the same for the
+ * sched_clock. We are not interested in a sched_clock for the
+ * 16-bit timers but only for the 32-bit one, so if no 32-bit
+ * timer is registered yet, we select this 32-bit timer as a
+ * sched_clock.
+ */
+ if (bits == 32 && !stm32_timer_cnt) {
+
+ /*
+ * Start immediately the counter as we will be using
+ * it right after.
+ */
+ stm32_timer_start(to);
+
+ stm32_timer_cnt = timer_of_base(to) + TIM_CNT;
+ sched_clock_register(stm32_read_sched_clock, bits, timer_of_rate(to));
+ pr_info("%s: STM32 sched_clock registered\n", name);
+
+ stm32_timer_delay.read_current_timer = stm32_read_delay;
+ stm32_timer_delay.freq = timer_of_rate(to);
+ register_current_timer_delay(&stm32_timer_delay);
+ pr_info("%s: STM32 delay timer registered\n", name);
}
- data->base = of_iomap(np, 0);
- if (!data->base) {
- ret = -ENXIO;
- pr_err("failed to map registers for clockevent\n");
- goto err_iomap;
- }
+ return clocksource_mmio_init(timer_of_base(to) + TIM_CNT, name,
+ timer_of_rate(to), bits == 32 ? 250 : 100,
+ bits, clocksource_mmio_readl_up);
+}
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
- pr_err("%pOF: failed to get irq.\n", np);
- goto err_get_irq;
- }
+static void __init stm32_clockevent_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
- /* Detect whether the timer is 16 or 32 bits */
- writel_relaxed(~0U, data->base + TIM_ARR);
- max_delta = readl_relaxed(data->base + TIM_ARR);
- if (max_delta == ~0U) {
- prescaler = 1;
- bits = 32;
- } else {
- prescaler = 1024;
- bits = 16;
- }
- writel_relaxed(0, data->base + TIM_ARR);
+ to->clkevt.name = to->np->full_name;
+ to->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to->clkevt.set_state_shutdown = stm32_clock_event_shutdown;
+ to->clkevt.set_state_periodic = stm32_clock_event_set_periodic;
+ to->clkevt.set_state_oneshot = stm32_clock_event_set_oneshot;
+ to->clkevt.tick_resume = stm32_clock_event_shutdown;
+ to->clkevt.set_next_event = stm32_clock_event_set_next_event;
+ to->clkevt.rating = bits == 32 ? 250 : 100;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 0x1,
+ (1 << bits) - 1);
+
+ pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
+ to->np, bits);
+}
+
+static int __init stm32_timer_init(struct device_node *node)
+{
+ struct reset_control *rstc;
+ struct timer_of *to;
+ int ret;
+
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- writel_relaxed(prescaler - 1, data->base + TIM_PSC);
- writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
- writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
- writel_relaxed(0, data->base + TIM_SR);
+ to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
+ to->of_irq.handler = stm32_clock_event_handler;
- data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
+ ret = timer_of_init(node, to);
+ if (ret)
+ goto err;
- clockevents_config_and_register(&data->evtdev,
- DIV_ROUND_CLOSEST(rate, prescaler),
- 0x1, max_delta);
+ to->private_data = kzalloc(sizeof(struct stm32_timer_private),
+ GFP_KERNEL);
+ if (!to->private_data)
+ goto deinit;
- ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
- "stm32 clockevent", data);
- if (ret) {
- pr_err("%pOF: failed to request irq.\n", np);
- goto err_get_irq;
+ rstc = of_reset_control_get(node, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
}
- pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
- np, bits);
+ stm32_timer_set_width(to);
- return ret;
+ stm32_timer_set_prescaler(to);
+
+ ret = stm32_clocksource_init(to);
+ if (ret)
+ goto deinit;
+
+ stm32_clockevent_init(to);
+ return 0;
-err_get_irq:
- iounmap(data->base);
-err_iomap:
- clk_disable_unprepare(clk);
-err_clk_enable:
- clk_put(clk);
-err_clk_get:
+deinit:
+ timer_of_cleanup(to);
+err:
+ kfree(to);
return ret;
}
-TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
+TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_timer_init);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index bdce4488ded1..3a88e33b0cfe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,29 @@
# ARM CPU Frequency scaling drivers
#
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ARM_ARMADA_37XX_CPUFREQ
+ tristate "Armada 37xx CPUFreq support"
+ depends on ARCH_MVEBU
+ help
+ This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+ The Armada 37xx PMU supports 4 frequency and VDD levels.
+
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
@@ -12,6 +35,30 @@ config ARM_BIG_LITTLE_CPUFREQ
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
+ help
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
+
+config ARM_SCPI_CPUFREQ
+ tristate "SCPI based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+ help
+ This adds the CPUfreq driver support for ARM big.LITTLE platforms
+ using SCPI protocol for CPU power management.
+
+ This driver uses SCPI Message Protocol driver to interact with the
+ firmware providing the CPU DVFS functionality.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on ARCH_BRCMSTB || COMPILE_TEST
@@ -33,20 +80,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
If in doubt, say N.
-config ARM_DT_BL_CPUFREQ
- tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && OF
- help
- This enables probing via DT for Generic CPUfreq driver for ARM
- big.LITTLE platform. This gets frequency tables from DT.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
- tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
- help
- This add the CPUfreq driver support for Versatile Express
- big.LITTLE platforms using SPC for power management.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
@@ -205,16 +238,6 @@ config ARM_SA1100_CPUFREQ
config ARM_SA1110_CPUFREQ
bool
-config ARM_SCPI_CPUFREQ
- tristate "SCPI based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
- help
- This adds the CPUfreq driver support for ARM big.LITTLE platforms
- using SCPI protocol for CPU power management.
-
- This driver uses SCPI Message Protocol driver to interact with the
- firmware providing the CPU DVFS functionality.
-
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
@@ -275,20 +298,3 @@ config ARM_PXA2xx_CPUFREQ
This add the CPUFreq driver support for Intel PXA2xx SOCs.
If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- default n
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 812f9e0d01a3..e07715ce8844 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,23 +52,26 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
+obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
@@ -81,8 +84,6 @@ obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
-obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
-obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 65ec5f01aa8d..c56b57dcfda5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -526,34 +526,13 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
static void bL_cpufreq_ready(struct cpufreq_policy *policy)
{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int cur_cluster = cpu_to_cluster(policy->cpu);
- struct device_node *np;
/* Do not register a cpu_cooling device if we are in IKS mode */
if (cur_cluster >= MAX_CLUSTERS)
return;
- np = of_node_get(cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(cdev[cur_cluster])) {
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev[cur_cluster]));
- cdev[cur_cluster] = NULL;
- }
- }
- of_node_put(np);
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver bL_cpufreq_driver = {
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
new file mode 100644
index 000000000000..c6ebc88a7d8d
--- /dev/null
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPU frequency scaling support for Armada 37xx platform.
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Power management in North Bridge register set */
+#define ARMADA_37XX_NB_L0L1 0x18
+#define ARMADA_37XX_NB_L2L3 0x1C
+#define ARMADA_37XX_NB_TBG_DIV_OFF 13
+#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
+#define ARMADA_37XX_NB_CLK_SEL_OFF 11
+#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
+#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1
+#define ARMADA_37XX_NB_TBG_SEL_OFF 9
+#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
+#define ARMADA_37XX_NB_VDD_SEL_OFF 6
+#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3
+#define ARMADA_37XX_NB_CONFIG_SHIFT 16
+#define ARMADA_37XX_NB_DYN_MOD 0x24
+#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26)
+#define ARMADA_37XX_NB_TBG_EN BIT(28)
+#define ARMADA_37XX_NB_DIV_EN BIT(29)
+#define ARMADA_37XX_NB_VDD_EN BIT(30)
+#define ARMADA_37XX_NB_DFS_EN BIT(31)
+#define ARMADA_37XX_NB_CPU_LOAD 0x30
+#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
+#define ARMADA_37XX_DVFS_LOAD_0 0
+#define ARMADA_37XX_DVFS_LOAD_1 1
+#define ARMADA_37XX_DVFS_LOAD_2 2
+#define ARMADA_37XX_DVFS_LOAD_3 3
+
+/*
+ * On Armada 37xx the Power management manages 4 level of CPU load,
+ * each level can be associated with a CPU clock source, a CPU
+ * divider, a VDD level, etc...
+ */
+#define LOAD_LEVEL_NR 4
+
+struct armada_37xx_dvfs {
+ u32 cpu_freq_max;
+ u8 divider[LOAD_LEVEL_NR];
+};
+
+static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
+ {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
+ {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
+};
+
+static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
+ if (freq == armada_37xx_dvfs[i].cpu_freq_max)
+ return &armada_37xx_dvfs[i];
+ }
+
+ pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
+ return NULL;
+}
+
+/*
+ * Setup the four level managed by the hardware. Once the four level
+ * will be configured then the DVFS will be enabled.
+ */
+static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ struct clk *clk, u8 *divider)
+{
+ int load_lvl;
+ struct clk *parent;
+
+ for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ unsigned int reg, mask, val, offset = 0;
+
+ if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
+ reg = ARMADA_37XX_NB_L0L1;
+ else
+ reg = ARMADA_37XX_NB_L2L3;
+
+ if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
+ load_lvl == ARMADA_37XX_DVFS_LOAD_2)
+ offset += ARMADA_37XX_NB_CONFIG_SHIFT;
+
+ /* Set cpu clock source, for all the level we use TBG */
+ val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
+ mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ << ARMADA_37XX_NB_CLK_SEL_OFF);
+
+ /*
+ * Set cpu divider based on the pre-computed array in
+ * order to have balanced step.
+ */
+ val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
+ mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
+ << ARMADA_37XX_NB_TBG_DIV_OFF);
+
+ /* Set VDD divider which is actually the load level. */
+ val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
+ mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
+ << ARMADA_37XX_NB_VDD_SEL_OFF);
+
+ val <<= offset;
+ mask <<= offset;
+
+ regmap_update_bits(base, reg, mask, val);
+ }
+
+ /*
+ * Set cpu clock source, for all the level we keep the same
+ * clock source that the one already configured. For this one
+ * we need to use the clock framework
+ */
+ parent = clk_get_parent(clk);
+ clk_set_parent(clk, parent);
+}
+
+static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+{
+ unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
+ mask = ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, 0);
+}
+
+static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
+{
+ unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
+ mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ /* Start with the highest load (0) */
+ val = ARMADA_37XX_DVFS_LOAD_0;
+ regmap_update_bits(base, reg, mask, val);
+
+ /* Now enable DVFS for the CPUs */
+ reg = ARMADA_37XX_NB_DYN_MOD;
+ mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
+ ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
+ ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, mask);
+}
+
+static int __init armada37xx_cpufreq_driver_init(void)
+{
+ struct armada_37xx_dvfs *dvfs;
+ struct platform_device *pdev;
+ unsigned int cur_frequency;
+ struct regmap *nb_pm_base;
+ struct device *cpu_dev;
+ int load_lvl, ret;
+ struct clk *clk;
+
+ nb_pm_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+
+ if (IS_ERR(nb_pm_base))
+ return -ENODEV;
+
+ /* Before doing any configuration on the DVFS first, disable it */
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+
+ /*
+ * On CPU 0 register the operating points supported (which are
+ * the nominal CPU frequency and full integer divisions of
+ * it).
+ */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ dev_err(cpu_dev, "Cannot get CPU\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ /* Get nominal (current) CPU frequency */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
+ return -EINVAL;
+ }
+
+ dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+ if (!dvfs)
+ return -EINVAL;
+
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+
+ for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+ load_lvl++) {
+ unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret) {
+ /* clean-up the already added opp before leaving */
+ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+ freq = cur_frequency / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(cpu_dev, freq);
+ }
+ return ret;
+ }
+ }
+
+ /* Now that everything is setup, enable the DVFS at hardware level */
+ armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+late_initcall(armada37xx_cpufreq_driver_init);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index ecc56e26f8f6..3b585e4bfac5 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "marvell,armadaxp", },
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
{ .compatible = "nvidia,tegra124", },
{ .compatible = "st,stih407", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 545946ad0752..de3d104c25d7 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -319,33 +319,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
static void cpufreq_ready(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- struct device_node *np = of_node_get(priv->cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- priv->cdev = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(priv->cdev)) {
- dev_err(priv->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(priv->cdev));
-
- priv->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver dt_cpufreq_driver = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 41d148af7748..421f318c0e66 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -601,19 +601,18 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
/**
* cpufreq_parse_governor - parse a governor string
*/
-static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
- struct cpufreq_governor **governor)
+static int cpufreq_parse_governor(char *str_governor,
+ struct cpufreq_policy *policy)
{
- int err = -EINVAL;
-
if (cpufreq_driver->setpolicy) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_PERFORMANCE;
- err = 0;
- } else if (!strncasecmp(str_governor, "powersave",
- CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_POWERSAVE;
- err = 0;
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
}
} else {
struct cpufreq_governor *t;
@@ -621,26 +620,31 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
-
- if (t == NULL) {
+ if (!t) {
int ret;
mutex_unlock(&cpufreq_governor_mutex);
+
ret = request_module("cpufreq_%s", str_governor);
- mutex_lock(&cpufreq_governor_mutex);
+ if (ret)
+ return -EINVAL;
- if (ret == 0)
- t = find_governor(str_governor);
- }
+ mutex_lock(&cpufreq_governor_mutex);
- if (t != NULL) {
- *governor = t;
- err = 0;
+ t = find_governor(str_governor);
}
+ if (t && !try_module_get(t->owner))
+ t = NULL;
mutex_unlock(&cpufreq_governor_mutex);
+
+ if (t) {
+ policy->governor = t;
+ return 0;
+ }
}
- return err;
+
+ return -EINVAL;
}
/**
@@ -760,11 +764,14 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy.policy,
- &new_policy.governor))
+ if (cpufreq_parse_governor(str_governor, &new_policy))
return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy);
+
+ if (new_policy.governor)
+ module_put(new_policy.governor->owner);
+
return ret ? ret : count;
}
@@ -1044,8 +1051,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (policy->last_policy)
new_policy.policy = policy->last_policy;
else
- cpufreq_parse_governor(gov->name, &new_policy.policy,
- NULL);
+ cpufreq_parse_governor(gov->name, &new_policy);
}
/* set default policy */
return cpufreq_set_policy(policy, &new_policy);
@@ -2160,7 +2166,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
- return;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1e55b5790853..1572129844a5 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -27,7 +27,7 @@ struct cpufreq_stats {
unsigned int *trans_table;
};
-static int cpufreq_stats_update(struct cpufreq_stats *stats)
+static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
@@ -35,7 +35,6 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
- return 0;
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index d9b2c2de49c4..741f22e5cee3 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -25,15 +25,29 @@ static struct regulator *arm_reg;
static struct regulator *pu_reg;
static struct regulator *soc_reg;
-static struct clk *arm_clk;
-static struct clk *pll1_sys_clk;
-static struct clk *pll1_sw_clk;
-static struct clk *step_clk;
-static struct clk *pll2_pfd2_396m_clk;
-
-/* clk used by i.MX6UL */
-static struct clk *pll2_bus_clk;
-static struct clk *secondary_sel_clk;
+enum IMX6_CPUFREQ_CLKS {
+ ARM,
+ PLL1_SYS,
+ STEP,
+ PLL1_SW,
+ PLL2_PFD2_396M,
+ /* MX6UL requires two more clks */
+ PLL2_BUS,
+ SECONDARY_SEL,
+};
+#define IMX6Q_CPUFREQ_CLK_NUM 5
+#define IMX6UL_CPUFREQ_CLK_NUM 7
+
+static int num_clks;
+static struct clk_bulk_data clks[] = {
+ { .id = "arm" },
+ { .id = "pll1_sys" },
+ { .id = "step" },
+ { .id = "pll1_sw" },
+ { .id = "pll2_pfd2_396m" },
+ { .id = "pll2_bus" },
+ { .id = "secondary_sel" },
+};
static struct device *cpu_dev;
static bool free_opp;
@@ -53,7 +67,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = freq_table[index].frequency;
freq_hz = new_freq * 1000;
- old_freq = clk_get_rate(arm_clk) / 1000;
+ old_freq = clk_get_rate(clks[ARM].clk) / 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
@@ -112,29 +126,35 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
- clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
- clk_set_parent(secondary_sel_clk, pll2_bus_clk);
+ clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_BUS].clk);
else
- clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
- clk_set_parent(step_clk, secondary_sel_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ }
} else {
- clk_set_parent(step_clk, pll2_pfd2_396m_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, new_freq * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
} else {
/* pll1_sys needs to be enabled for divider rate change to work. */
pll1_sys_temp_enabled = true;
- clk_prepare_enable(pll1_sys_clk);
+ clk_prepare_enable(clks[PLL1_SYS].clk);
}
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, new_freq * 1000);
+ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
@@ -143,7 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* PLL1 is only needed until after ARM-PODF is set. */
if (pll1_sys_temp_enabled)
- clk_disable_unprepare(pll1_sys_clk);
+ clk_disable_unprepare(clks[PLL1_SYS].clk);
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
@@ -174,7 +194,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- policy->clk = arm_clk;
+ policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = policy->max;
@@ -244,6 +264,43 @@ put_node:
of_node_put(np);
}
+#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
+
+static void imx6ul_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+ if (!np)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ goto put_node;
+ }
+
+ /*
+ * Speed GRADING[1:0] defines the max speed of ARM:
+ * 2b'00: Reserved;
+ * 2b'01: 528000000Hz;
+ * 2b'10: 696000000Hz;
+ * 2b'11: Reserved;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+ if (dev_pm_opp_disable(dev, 696000000))
+ dev_warn(dev, "failed to disable 696MHz OPP\n");
+ iounmap(base);
+put_node:
+ of_node_put(np);
+}
+
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -266,28 +323,15 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return -ENOENT;
}
- arm_clk = clk_get(cpu_dev, "arm");
- pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
- pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
- step_clk = clk_get(cpu_dev, "step");
- pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
- if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
- IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
- dev_err(cpu_dev, "failed to get clocks\n");
- ret = -ENOENT;
- goto put_clk;
- }
-
if (of_machine_is_compatible("fsl,imx6ul") ||
- of_machine_is_compatible("fsl,imx6ull")) {
- pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
- secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
- if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
- dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
- ret = -ENOENT;
- goto put_clk;
- }
- }
+ of_machine_is_compatible("fsl,imx6ull"))
+ num_clks = IMX6UL_CPUFREQ_CLK_NUM;
+ else
+ num_clks = IMX6Q_CPUFREQ_CLK_NUM;
+
+ ret = clk_bulk_get(cpu_dev, num_clks, clks);
+ if (ret)
+ goto put_node;
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
@@ -311,7 +355,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- imx6q_opp_check_speed_grading(cpu_dev);
+ if (of_machine_is_compatible("fsl,imx6ul"))
+ imx6ul_opp_check_speed_grading(cpu_dev);
+ else
+ imx6q_opp_check_speed_grading(cpu_dev);
/* Because we have added the OPPs here, we must free them */
free_opp = true;
@@ -424,22 +471,11 @@ put_reg:
regulator_put(pu_reg);
if (!IS_ERR(soc_reg))
regulator_put(soc_reg);
-put_clk:
- if (!IS_ERR(arm_clk))
- clk_put(arm_clk);
- if (!IS_ERR(pll1_sys_clk))
- clk_put(pll1_sys_clk);
- if (!IS_ERR(pll1_sw_clk))
- clk_put(pll1_sw_clk);
- if (!IS_ERR(step_clk))
- clk_put(step_clk);
- if (!IS_ERR(pll2_pfd2_396m_clk))
- clk_put(pll2_pfd2_396m_clk);
- if (!IS_ERR(pll2_bus_clk))
- clk_put(pll2_bus_clk);
- if (!IS_ERR(secondary_sel_clk))
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
+put_node:
of_node_put(np);
+
return ret;
}
@@ -453,13 +489,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
regulator_put(soc_reg);
- clk_put(arm_clk);
- clk_put(pll1_sys_clk);
- clk_put(pll1_sw_clk);
- clk_put(step_clk);
- clk_put(pll2_pfd2_396m_clk);
- clk_put(pll2_bus_clk);
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 93a0e88bef76..7edf7a0e5a96 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1595,15 +1595,6 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-static const struct pstate_funcs bxt_funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = core_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
-};
-
#define ICPU(model, policy) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
(unsigned long)&policy }
@@ -1627,8 +1618,9 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
+ ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c46a12df40dd..5faa37c5b091 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
longhaul_setup_voltagescaling();
- policy->cpuinfo.transition_latency = 200000; /* nsec */
+ policy->transition_delay_us = 200000; /* usec */
return cpufreq_table_validate_and_show(policy, longhaul_table);
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index e0d5090b303d..8c04dddd3c28 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -310,28 +310,8 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
- struct device_node *np = of_node_get(info->cpu_dev->of_node);
- u32 capacitance = 0;
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
-
- info->cdev = of_cpufreq_power_cooling_register(np,
- policy, capacitance, NULL);
-
- if (IS_ERR(info->cdev)) {
- dev_err(info->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(info->cdev));
-
- info->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ info->cdev = of_cpufreq_cooling_register(policy);
}
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
@@ -574,6 +554,7 @@ static struct platform_driver mtk_cpufreq_platdrv = {
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
{ .compatible = "mediatek,mt7622", },
{ .compatible = "mediatek,mt7623", },
{ .compatible = "mediatek,mt817x", },
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index ed915ee85dd9..31513bd42705 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -76,12 +76,6 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
return PTR_ERR(clk);
}
- /*
- * In case of a failure of dev_pm_opp_add(), we don't
- * bother with cleaning up the registered OPP (there's
- * no function to do so), and simply cancel the
- * registration of the cpufreq device.
- */
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
@@ -91,7 +85,8 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
clk_put(clk);
- return ret;
+ dev_err(cpu_dev, "Failed to register OPPs\n");
+ goto opp_register_failed;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
@@ -99,9 +94,16 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ clk_put(clk);
}
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
+
+opp_register_failed:
+ /* As registering has failed remove all the opp for all cpus */
+ dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
+
+ return ret;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index b6d7c4c98d0a..29cdec198657 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -29,6 +29,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/hashtable.h>
#include <trace/events/power.h>
#include <asm/cputhreads.h>
@@ -38,14 +39,13 @@
#include <asm/opal.h>
#include <linux/timer.h>
-#define POWERNV_MAX_PSTATES 256
+#define POWERNV_MAX_PSTATES_ORDER 8
+#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
-#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
-#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
-#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -94,6 +94,27 @@ struct global_pstate_info {
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+/**
+ * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
+ * indexed by a function of pstate id.
+ *
+ * @pstate_id: pstate id for this entry.
+ *
+ * @cpufreq_table_idx: Index into the powernv_freqs
+ * cpufreq_frequency_table for frequency
+ * corresponding to pstate_id.
+ *
+ * @hentry: hlist_node that hooks this entry into the pstate_revmap
+ * hashtable
+ */
+struct pstate_idx_revmap_data {
+ u8 pstate_id;
+ unsigned int cpufreq_table_idx;
+ struct hlist_node hentry;
+};
+
static bool rebooting, throttled, occ_reset;
static const char * const throttle_reason[] = {
@@ -148,39 +169,56 @@ static struct powernv_pstate_info {
bool wof_enabled;
} powernv_pstate_info;
-/* Use following macros for conversions between pstate_id and index */
-static inline int idx_to_pstate(unsigned int i)
+static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
+{
+ return ((pmsr_val >> shift) & 0xFF);
+}
+
+#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
+#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
+#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
+
+/* Use following functions for conversions between pstate_id and index */
+
+/**
+ * idx_to_pstate : Returns the pstate id corresponding to the
+ * frequency in the cpufreq frequency table
+ * powernv_freqs indexed by @i.
+ *
+ * If @i is out of bound, this will return the pstate
+ * corresponding to the nominal frequency.
+ */
+static inline u8 idx_to_pstate(unsigned int i)
{
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
- pr_warn_once("index %u is out of bound\n", i);
+ pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data;
}
-static inline unsigned int pstate_to_idx(int pstate)
+/**
+ * pstate_to_idx : Returns the index in the cpufreq frequencytable
+ * powernv_freqs for the frequency whose corresponding
+ * pstate id is @pstate.
+ *
+ * If no frequency corresponding to @pstate is found,
+ * this will return the index of the nominal
+ * frequency.
+ */
+static unsigned int pstate_to_idx(u8 pstate)
{
- int min = powernv_freqs[powernv_pstate_info.min].driver_data;
- int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+ unsigned int key = pstate % POWERNV_MAX_PSTATES;
+ struct pstate_idx_revmap_data *revmap_data;
- if (min > 0) {
- if (unlikely((pstate < max) || (pstate > min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
- } else {
- if (unlikely((pstate > max) || (pstate < min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
+ hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
+ if (revmap_data->pstate_id == pstate)
+ return revmap_data->cpufreq_table_idx;
}
- /*
- * abs() is deliberately used so that is works with
- * both monotonically increasing and decreasing
- * pstate values
- */
- return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+
+ pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
+ return powernv_pstate_info.nominal;
}
static inline void reset_gpstates(struct cpufreq_policy *policy)
@@ -247,7 +285,7 @@ static int init_powernv_pstates(void)
powernv_pstate_info.wof_enabled = true;
next:
- pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+ pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
@@ -278,19 +316,30 @@ next:
powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
+
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
u32 freq = be32_to_cpu(pstate_freqs[i]);
+ struct pstate_idx_revmap_data *revmap_data;
+ unsigned int key;
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
- powernv_freqs[i].driver_data = id;
+ powernv_freqs[i].driver_data = id & 0xFF;
+
+ revmap_data = (struct pstate_idx_revmap_data *)
+ kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+
+ revmap_data->pstate_id = id & 0xFF;
+ revmap_data->cpufreq_table_idx = i;
+ key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
+ hash_add(pstate_revmap, &revmap_data->hentry, key);
if (id == pstate_max)
powernv_pstate_info.max = i;
- else if (id == pstate_nominal)
+ if (id == pstate_nominal)
powernv_pstate_info.nominal = i;
- else if (id == pstate_min)
+ if (id == pstate_min)
powernv_pstate_info.min = i;
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
@@ -307,14 +356,13 @@ next:
}
/* Returns the CPU frequency corresponding to the pstate_id. */
-static unsigned int pstate_id_to_freq(int pstate_id)
+static unsigned int pstate_id_to_freq(u8 pstate_id)
{
int i;
i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
- pr_warn("PState id %d outside of PState table, "
- "reporting nominal id %d instead\n",
+ pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
i = powernv_pstate_info.nominal;
}
@@ -420,8 +468,8 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
*/
struct powernv_smp_call_data {
unsigned int freq;
- int pstate_id;
- int gpstate_id;
+ u8 pstate_id;
+ u8 gpstate_id;
};
/*
@@ -438,22 +486,15 @@ struct powernv_smp_call_data {
static void powernv_read_cpu_freq(void *arg)
{
unsigned long pmspr_val;
- s8 local_pstate_id;
struct powernv_smp_call_data *freq_data = arg;
pmspr_val = get_pmspr(SPRN_PMSR);
-
- /*
- * The local pstate id corresponds bits 48..55 in the PMSR.
- * Note: Watch out for the sign!
- */
- local_pstate_id = (pmspr_val >> 48) & 0xFF;
- freq_data->pstate_id = local_pstate_id;
+ freq_data->pstate_id = extract_local_pstate(pmspr_val);
freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
- pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
- raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
- freq_data->freq);
+ pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
}
/*
@@ -515,21 +556,21 @@ static void powernv_cpufreq_throttle_check(void *data)
struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax;
+ u8 pmsr_pmax;
unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
- pmsr_pmax = (s8)PMSR_MAX(pmsr);
+ pmsr_pmax = extract_max_pstate(pmsr);
pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
- pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
+ pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
cpu, chip->id, pmsr_pmax,
idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
@@ -645,8 +686,8 @@ void gpstate_timer_handler(struct timer_list *t)
* value. Hence, read from PMCR to get correct data.
*/
val = get_pmspr(SPRN_PMCR);
- freq_data.gpstate_id = (s8)GET_GPSTATE(val);
- freq_data.pstate_id = (s8)GET_LPSTATE(val);
+ freq_data.gpstate_id = extract_global_pstate(val);
+ freq_data.pstate_id = extract_local_pstate(val);
if (freq_data.gpstate_id == freq_data.pstate_id) {
reset_gpstates(policy);
spin_unlock(&gpstates->gpstate_lock);
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4ada55b8856e..0562761a3dec 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -275,20 +275,8 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
{
struct cpu_data *cpud = policy->driver_data;
- struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cpud->cdev = of_cpufreq_cooling_register(np, policy);
-
- if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
- pr_err("cpu%d is not running as cooling device: %ld\n",
- policy->cpu, PTR_ERR(cpud->cdev));
-
- cpud->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ cpud->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver qoriq_cpufreq_driver = {
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 05d299052c5c..247fcbfa4cb5 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,27 +18,89 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/export.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+struct scpi_data {
+ struct clk *clk;
+ struct device *cpu_dev;
+ struct thermal_cooling_device *cdev;
+};
static struct scpi_ops *scpi_ops;
-static int scpi_get_transition_latency(struct device *cpu_dev)
+static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = clk_get_rate(priv->clk);
+
+ return rate / 1000;
+}
+
+static int
+scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct scpi_data *priv = policy->driver_data;
+ u64 rate = policy->freq_table[index].frequency * 1000;
+ int ret;
+
+ ret = clk_set_rate(priv->clk, rate);
+ if (!ret && (clk_get_rate(priv->clk) != rate))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int
+scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return scpi_ops->get_transition_latency(cpu_dev);
+ int cpu, domain, tdomain;
+ struct device *tcpu_dev;
+
+ domain = scpi_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev)
+ continue;
+
+ tdomain = scpi_ops->device_domain_id(tcpu_dev);
+ if (tdomain == domain)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return 0;
}
-static int scpi_init_opp_table(const struct cpumask *cpumask)
+static int scpi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ unsigned int latency;
+ struct device *cpu_dev;
+ struct scpi_data *priv;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
ret = scpi_ops->add_opps_to_device(cpu_dev);
if (ret) {
@@ -46,32 +108,133 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
- if (ret)
+ ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
+ __func__, cpu_dev->id);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->driver_data = priv;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+ ret);
+ goto out_put_clk;
+ }
+
+ /* scpi allows DVFS request for any domain from any CPU */
+ policy->dvfs_possible_from_any_cpu = true;
+
+ latency = scpi_ops->get_transition_latency(cpu_dev);
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+
+ policy->fast_switch_possible = false;
+ return 0;
+
+out_put_clk:
+ clk_put(priv->clk);
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_cpumask_remove_table(policy->cpus);
+
return ret;
}
-static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
- .name = "scpi",
- .get_transition_latency = scpi_get_transition_latency,
- .init_opp_table = scpi_init_opp_table,
- .free_opp_table = dev_pm_opp_cpumask_remove_table,
+static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+
+ cpufreq_cooling_unregister(priv->cdev);
+ clk_put(priv->clk);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ kfree(priv);
+ dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+
+ return 0;
+}
+
+static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+ struct thermal_cooling_device *cdev;
+
+ cdev = of_cpufreq_cooling_register(policy);
+ if (!IS_ERR(cdev))
+ priv->cdev = cdev;
+}
+
+static struct cpufreq_driver scpi_cpufreq_driver = {
+ .name = "scpi-cpufreq",
+ .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = scpi_cpufreq_get_rate,
+ .init = scpi_cpufreq_init,
+ .exit = scpi_cpufreq_exit,
+ .ready = scpi_cpufreq_ready,
+ .target_index = scpi_cpufreq_set_target,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
{
+ int ret;
+
scpi_ops = get_scpi_ops();
if (!scpi_ops)
return -EIO;
- return bL_cpufreq_register(&scpi_cpufreq_ops);
+ ret = cpufreq_register_driver(&scpi_cpufreq_driver);
+ if (ret)
+ dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ __func__, ret);
+ return ret;
}
static int scpi_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&scpi_cpufreq_ops);
+ cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
return 0;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 923317f03b4b..a099b7bf74cd 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -50,6 +51,7 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_mask;
unsigned long efuse_shift;
unsigned long rev_offset;
+ bool multi_regulator;
};
struct ti_cpufreq_data {
@@ -57,6 +59,7 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
+ struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -95,6 +98,7 @@ static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_offset = 0x07fc,
.efuse_mask = 0x1fff,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data am4x_soc_data = {
@@ -103,6 +107,7 @@ static struct ti_cpufreq_soc_data am4x_soc_data = {
.efuse_offset = 0x0610,
.efuse_mask = 0x3f,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data dra7_soc_data = {
@@ -111,6 +116,7 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.efuse_mask = 0xf80000,
.efuse_shift = 19,
.rev_offset = 0x204,
+ .multi_regulator = true,
};
/**
@@ -195,12 +201,14 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{},
};
-static int ti_cpufreq_init(void)
+static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
struct device_node *np;
const struct of_device_id *match;
+ struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
+ const char * const reg_names[] = {"vdd", "vbb"};
int ret;
np = of_find_node_by_path("/");
@@ -247,16 +255,29 @@ static int ti_cpufreq_init(void)
if (ret)
goto fail_put_node;
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT));
- if (ret) {
+ ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+ version, VERSION_COUNT);
+ if (IS_ERR(ti_opp_table)) {
dev_err(opp_data->cpu_dev,
"Failed to set supported hardware\n");
+ ret = PTR_ERR(ti_opp_table);
goto fail_put_node;
}
- of_node_put(opp_data->opp_node);
+ opp_data->opp_table = ti_opp_table;
+
+ if (opp_data->soc_data->multi_regulator) {
+ ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
+ reg_names,
+ ARRAY_SIZE(reg_names));
+ if (IS_ERR(ti_opp_table)) {
+ dev_pm_opp_put_supported_hw(opp_data->opp_table);
+ ret = PTR_ERR(ti_opp_table);
+ goto fail_put_node;
+ }
+ }
+ of_node_put(opp_data->opp_node);
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
@@ -269,4 +290,22 @@ free_opp_data:
return ret;
}
-device_initcall(ti_cpufreq_init);
+
+static int ti_cpufreq_init(void)
+{
+ platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ti_cpufreq_init);
+
+static struct platform_driver ti_cpufreq_driver = {
+ .probe = ti_cpufreq_probe,
+ .driver = {
+ .name = "ti-cpufreq",
+ },
+};
+module_platform_driver(ti_cpufreq_driver);
+
+MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 4e78263e34a4..5d359aff3cc5 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -36,14 +36,15 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
/**
* cpuidle_switch_governor - changes the governor
* @gov: the new target governor
- *
- * NOTE: "gov" can be NULL to specify disabled
* Must be called with cpuidle_lock acquired.
*/
int cpuidle_switch_governor(struct cpuidle_governor *gov)
{
struct cpuidle_device *dev;
+ if (!gov)
+ return -EINVAL;
+
if (gov == cpuidle_curr_governor)
return 0;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47ec920d5b71..4b741b83e23f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -723,7 +723,6 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- select CRYPTO_SHA384
select CRYPTO_SHA512
help
Enables the driver for the on-chip crypto accelerator
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index eeaf27859d80..ea83d0bff0e9 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
return true;
- /* CCM - fix CBC MAC mismatch in special case */
- if (is_ccm && decrypt && !req->assoclen)
- return true;
-
return false;
}
@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index c44954e274bc..76f459ad2821 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
@@ -275,14 +282,12 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
- memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
-
return 0;
}
@@ -570,15 +575,14 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct aead_request *aead_req;
- struct crypto4xx_ctx *ctx;
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
int err = 0;
- aead_req = container_of(pd_uinfo->async_req, struct aead_request,
- base);
- ctx = crypto_tfm_ctx(aead_req->base.tfm);
-
if (pd_uinfo->using_sd) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
@@ -590,38 +594,39 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
- size_t cp_len = crypto_aead_authsize(
- crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
-
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
cp_len);
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd->pd_ctl.bf.status & 0xff) {
- if (pd->pd_ctl.bf.status & 0x1) {
- /* authentication error */
- err = -EBADMSG;
- } else {
- if (!__ratelimit(&dev->aead_ratelimit)) {
- if (pd->pd_ctl.bf.status & 2)
- pr_err("pad fail error\n");
- if (pd->pd_ctl.bf.status & 4)
- pr_err("seqnum fail\n");
- if (pd->pd_ctl.bf.status & 8)
- pr_err("error _notify\n");
- pr_err("aead return err status = 0x%02x\n",
- pd->pd_ctl.bf.status & 0xff);
- pr_err("pd pad_ctl = 0x%08x\n",
- pd->pd_ctl.bf.pd_pad_ctl);
- }
- err = -EINVAL;
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
}
+ err = -EINVAL;
}
if (pd_uinfo->state & PD_ENTRY_BUSY)
@@ -1070,21 +1075,29 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
/**
* Top Half of isr.
*/
-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (!core_dev->dev->ce_base)
- return 0;
-
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1266,6 +1279,8 @@ static int crypto4xx_probe(struct platform_device *ofdev)
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ u32 pvr;
+ bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
@@ -1282,6 +1297,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1304,7 +1320,22 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (!core_dev->dev)
goto err_alloc_dev;
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
@@ -1325,13 +1356,6 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- /* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
- if (rc)
- goto err_request_irq;
-
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
@@ -1339,6 +1363,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
goto err_iomap;
}
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
+ KBUILD_MODNAME, dev);
+ if (rc)
+ goto err_request_irq;
+
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1352,11 +1385,11 @@ static int crypto4xx_probe(struct platform_device *ofdev)
return 0;
err_start_dev:
- iounmap(core_dev->dev->ce_base);
-err_iomap:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
@@ -1397,7 +1430,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 8ac3bd37203b..23b726da6534 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -28,8 +28,6 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
-#define MODULE_NAME "crypto4xx"
-
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -82,7 +80,6 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
- char *name;
void __iomem *ce_base;
void __iomem *trng_base;
@@ -109,6 +106,7 @@ struct crypto4xx_device {
struct list_head alg_list; /* List of algorithm supported
by this device */
struct ratelimit_state aead_ratelimit;
+ bool is_revb;
};
struct crypto4xx_core_device {
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 0a22ec5d1a96..472331787e04 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -121,13 +121,15 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17fd223..5e63742b0d22 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
if (!rng)
goto err_out;
- rng->name = MODULE_NAME;
+ rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 456278440863..0fb8bbf41a8d 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
// The HW omits the initial increment of the counter field.
- crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+ memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
@@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = {
.setkey = artpec6_crypto_aead_set_key,
.encrypt = artpec6_crypto_aead_encrypt,
.decrypt = artpec6_crypto_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -3041,9 +3042,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
variant = (enum artpec6_crypto_variant)match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index ce70b44d0fb6..2b75f95bbe1b 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,6 @@
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/hash.h>
-#include <crypto/aes.h>
#include <crypto/sha3.h>
#include "util.h"
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a118b9bed669..bfbf8bf77f03 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -494,7 +494,8 @@ static struct ahash_alg algs = {
.cra_driver_name = DRIVER_NAME,
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
.cra_alignmask = 3,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index baa8dd52472d..2188235be02d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
- false);
+ false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_enc:
/*
@@ -266,9 +271,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
- nonce, ctx1_iv_off, false);
+ nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -300,9 +305,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, false);
+ ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4106_set_sh_desc(aead);
}
@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4543_set_sh_desc(aead);
}
@@ -625,7 +654,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -648,9 +676,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
@@ -658,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -691,23 +718,21 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -979,9 +1004,6 @@ static void init_aead_job(struct aead_request *req,
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
-
- /* REG3 = assoclen */
- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
}
static void init_gcm_job(struct aead_request *req,
@@ -996,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req,
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
/* BUG This should not be specific to generic GCM. */
last = 0;
@@ -1022,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -1045,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
+ /*
+ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+ * having DPOVRD as destination.
+ */
+ if (ctrlpriv->era < 3)
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ else
+ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
@@ -3228,9 +3261,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3238,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -3269,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3279,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
offsetof(struct caam_ctx, sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..ceb93fbb76e6 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type)
* cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
* (non-protocol) with no (null) encryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
- KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* assoclen + cryptlen = seqinlen */
@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
* cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
* (non-protocol) with no (null) decryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* Class 2 operation */
@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
static void init_sh_desc_key_aead(u32 * const desc,
struct alginfo * const cdata,
struct alginfo * const adata,
- const bool is_rfc3686, u32 *nonce)
+ const bool is_rfc3686, u32 *nonce, int era)
{
u32 *key_jump_cmd;
unsigned int enckeylen = cdata->keylen;
@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc,
if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE;
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, enckeylen,
@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc,
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+ int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi)
+ const u32 ctx1_iv_off, const bool is_qi, int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
- else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+ CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+ CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi)
+ const bool is_qi, int era)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
if (is_qi) {
u32 *wait_load_cmd;
@@ -528,8 +561,13 @@ copy_iv:
OP_ALG_ENCRYPT);
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -1075,7 +1113,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1140,7 +1178,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1209,7 +1247,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index e412ec8f7005..5f9445ae2114 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi);
+ const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index f9f08fce4356..4aecc9435f69 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_enc:
/* aead_decrypt shared descriptor */
@@ -149,7 +151,8 @@ skip_enc:
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv,
- is_rfc3686, nonce, ctx1_iv_off, true);
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ ctrlpriv->era);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -176,7 +179,7 @@ skip_enc:
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_givenc:
return 0;
@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
@@ -272,7 +298,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
int ret = 0;
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -295,9 +320,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
@@ -356,10 +380,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts ablkcipher encrypt, decrypt shared descriptors */
@@ -668,7 +690,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -905,7 +927,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -1058,7 +1080,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, GIVENCRYPT, 0, 0);
@@ -2123,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
struct caam_drv_private *priv;
@@ -2137,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
@@ -2149,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock);
@@ -2167,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2177,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -2186,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 616720a04e7a..0beb28196e20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
* read and write to seqout
*/
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx)
+ struct caam_hash_ctx *ctx, bool import_ctx,
+ int era)
{
u32 op = ctx->adata.algtype;
u32 *skip_key_load;
@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6)
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load);
@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
- CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.key_inline = true;
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad, 1);
-#endif
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ memcpy(ctx->key, key, keylen);
+ } else {
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+ keylen, CAAM_MAX_HASH_KEY_SIZE);
+ if (ret)
+ goto bad_free_key;
+ }
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..75d280cb2dc0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
+ ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+ ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present,
caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 8142de7ba050..f76ff160a02c 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
@@ -1093,6 +1105,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index ba1ca0806f0a..d4256fa4a1d6 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 * const desc, void *data, int len)
+static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 * const desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -452,7 +452,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
- void *key_virt;
+ const void *key_virt;
};
bool key_inline;
};
@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * keylen should be the length of initial key, while keylen_pad
+ * the length of the derived (split) key.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+ u32 protid;
+
+ /*
+ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+ * to OP_PCLID_DKP_{MD5, SHA*}
+ */
+ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+ (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+ if (adata->key_inline) {
+ int words;
+
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+ adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ /* Reserve space in descriptor buffer for the derived key */
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+ if (words)
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+ adata->keylen);
+ append_ptr(desc, adata->key_dma);
+ }
+}
+
#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 91f1107276e5..7696a774a362 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -84,6 +84,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8c79c3a153dc..312b5f042f31 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -11,36 +11,6 @@
#include "desc_constr.h"
#include "key_gen.h"
-/**
- * split_key_len - Compute MDHA split key length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key length
- */
-static inline u32 split_key_len(u32 hash)
-{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- u32 idx;
-
- idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
-
- return (u32)(mdpadlen[idx] * 2);
-}
-
-/**
- * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key pad length
- */
-static inline u32 split_key_pad_len(u32 hash)
-{
- return ALIGN(split_key_len(hash), 16);
-}
-
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index 5db055c25bd2..818f78f6fc1a 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -6,6 +6,36 @@
*
*/
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
struct split_key_result {
struct completion completion;
int err;
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 169e66231bcf..b0ba4331944b 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto request_cleanup;
}
result = (union cpt_res_s *)info->completion_addr;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4addc238a6ef..deaefd532aaa 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -6,7 +6,6 @@
#include "nitrox_dev.h"
#include "nitrox_req.h"
#include "nitrox_csr.h"
-#include "nitrox_req.h"
/* SLC_STORE_INFO */
#define MIN_UDD_LEN 16
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff02b713c6f6..ca1f0d780b61 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -21,7 +21,6 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/delay.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index b56b3f711d94..5ae9f8706f17 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -19,3 +19,13 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
+
+config CHELSIO_IPSEC_INLINE
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on CRYPTO_DEV_CHELSIO
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index bebdf06687ad..eaecaf1ebcf3 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 4eed7171e2ae..34a02d690548 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -73,6 +73,29 @@
#define IV AES_BLOCK_SIZE
+static unsigned int sgl_ent_len[] = {
+ 0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+ 0, 32, 32, 48, 48, 64, 64, 80, 80,
+ 112, 112, 128, 128, 144, 144, 160, 160,
+ 192, 192, 208, 208, 224, 224, 240, 240,
+ 272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+ unsigned char *input, int err);
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -108,18 +131,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
return (skb->len <= SGE_MAX_WR_LEN);
}
-/*
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
-}
-
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
@@ -160,7 +171,6 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
if (input == NULL)
goto out;
- reqctx = ahash_request_ctx(req);
digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
if (reqctx->is_sg_map)
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
@@ -183,30 +193,17 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
}
out:
req->base.complete(&req->base, err);
+}
- }
-
-static inline void chcr_handle_aead_resp(struct aead_request *req,
- unsigned char *input,
- int err)
+static inline int get_aead_subtype(struct crypto_aead *aead)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
-
-
- chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
- if (reqctx->b0_dma)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
- reqctx->b0_len, DMA_BIDIRECTIONAL);
- if (reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(req, input, &err);
- reqctx->verify = VERIFY_HW;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
- req->base.complete(&req->base, err);
-}
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -231,6 +228,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
+ req->base.complete(&req->base, err);
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
@@ -558,7 +574,8 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
skip = 0;
}
}
- if (walk->nents == 0) {
+ WARN(!sg, "SG should not be null here\n");
+ if (sg && (walk->nents == 0)) {
small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
walk->sgl->len0 = cpu_to_be32(sgmin);
@@ -595,14 +612,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_aead_subtype(struct crypto_aead *aead)
-{
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.aead);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
-}
-
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -675,7 +684,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
if (srclen <= dstlen)
break;
less = min_t(unsigned int, sg_dma_len(dst) - offset -
- dstskip, CHCR_DST_SG_SIZE);
+ dstskip, CHCR_DST_SG_SIZE);
dstlen += less;
offset += less;
if (offset == sg_dma_len(dst)) {
@@ -686,7 +695,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
dstskip = 0;
}
src = sg_next(src);
- srcskip = 0;
+ srcskip = 0;
}
return min(srclen, dstlen);
}
@@ -1008,7 +1017,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+ u32 isfinal)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
@@ -1035,7 +1045,8 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_decrypt_one(cipher, iv, iv);
+ if (!isfinal)
+ crypto_cipher_decrypt_one(cipher, iv, iv);
out:
return ret;
}
@@ -1056,7 +1067,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
AES_BLOCK_SIZE) + 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1087,7 +1098,7 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
ctr_add_iv(iv, req->info, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1101,7 +1112,6 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
@@ -1135,10 +1145,10 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
@@ -1239,15 +1249,15 @@ static int process_cipher(struct ablkcipher_request *req,
MIN_CIPHER_SG,
SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
bytes = req->nbytes;
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
@@ -2014,11 +2024,8 @@ static int chcr_aead_common_init(struct aead_request *req,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int error = -EINVAL;
- unsigned int dst_size;
unsigned int authsize = crypto_aead_authsize(tfm);
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
@@ -2083,7 +2090,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, temp;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
@@ -2097,24 +2104,19 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
return NULL;
reqctx->b0_dma = 0;
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
assoclen = 0;
}
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
- dnents += MIN_AUTH_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
@@ -2162,16 +2164,23 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
temp & 0xF,
null ? 0 : assoclen + IV + 1,
temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+ else
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
- CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ temp,
actx->auth_mode, aeadctx->hmac_ctrl,
IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (op_type == CHCR_ENCRYPT_OP)
+ if (op_type == CHCR_ENCRYPT_OP ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key,
aeadctx->enckey_len);
else
@@ -2181,7 +2190,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
- memcpy(reqctx->iv, req->iv, IV);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+ memcpy(reqctx->iv, req->iv, IV);
+ }
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
@@ -2202,9 +2220,9 @@ err:
return ERR_PTR(error);
}
-static int chcr_aead_dma_map(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
int error;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2246,9 +2264,9 @@ err:
return -ENOMEM;
}
-static void chcr_aead_dma_unmap(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2273,10 +2291,10 @@ static void chcr_aead_dma_unmap(struct device *dev,
}
}
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type)
+void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2308,11 +2326,11 @@ static inline void chcr_add_aead_src_ent(struct aead_request *req,
}
}
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid)
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2330,9 +2348,9 @@ static inline void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam)
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2355,10 +2373,10 @@ static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid)
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct dsgl_walk dsgl_walk;
@@ -2373,9 +2391,9 @@ static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param)
+void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
{
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
@@ -2395,16 +2413,13 @@ static inline void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
&reqctx->dma_addr);
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
- 0);
-// reqctx->srcsg = ulp_walk.last_sg;
-// reqctx->src_ofst = ulp_walk.last_sg_len;
- ulptx_walk_end(&ulp_walk);
+ 0);
+ ulptx_walk_end(&ulp_walk);
}
}
-
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req)
+int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
int error = 0;
@@ -2414,13 +2429,13 @@ static inline int chcr_hash_dma_map(struct device *dev,
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
- return error;
+ return -ENOMEM;
req_ctx->is_sg_map = 1;
return 0;
}
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req)
+void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -2433,9 +2448,8 @@ static inline void chcr_hash_dma_unmap(struct device *dev,
}
-
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
{
int error;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2469,8 +2483,9 @@ err:
dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+
+void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2666,8 +2681,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sub_type = get_aead_subtype(tfm);
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen -= 8;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
@@ -2677,15 +2690,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
- + (op_type ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_CCM_SG; // For IV and B0
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
@@ -2780,19 +2789,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
assoclen = req->assoclen - 8;
reqctx->b0_dma = 0;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
error = chcr_aead_common_init(req, op_type);
- if (error)
- return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst,
- req->cryptlen + (op_type ? -authsize : authsize),
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_GCM_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
@@ -2829,10 +2833,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
assoclen + IV + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
temp, temp);
- chcr_req->sec_cpl.seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -3212,7 +3216,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs;
+ unsigned int bs, subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3241,6 +3245,15 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
pr_err("chcr : Unsupported digest size\n");
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3258,9 +3271,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
*/
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
@@ -3333,6 +3349,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
struct crypto_authenc_keys keys;
int err;
/* it contains auth and cipher key both*/
+ unsigned int subtype;
int key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3350,6 +3367,15 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3357,13 +3383,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
key_ctx_len = sizeof(struct _key_ctx)
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
@@ -3375,6 +3404,40 @@ out:
aeadctx->enckey_len = 0;
return -EINVAL;
}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx;
+ struct sk_buff *skb;
+
+ if (!a_ctx(tfm)->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ u_ctx = ULD_CTX(a_ctx(tfm));
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ a_ctx(tfm)->tx_qidx)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
+ op_type);
+
+ if (IS_ERR(skb) || !skb)
+ return PTR_ERR(skb);
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -3383,8 +3446,10 @@ static int chcr_aead_encrypt(struct aead_request *req)
reqctx->verify = VERIFY_HW;
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3413,8 +3478,10 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3427,38 +3494,6 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
}
-static int chcr_aead_op(struct aead_request *req,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx;
- struct sk_buff *skb;
-
- if (!a_ctx(tfm)->dev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
- return -ENXIO;
- }
- u_ctx = ULD_CTX(a_ctx(tfm));
- if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
- }
-
- /* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
- op_type);
-
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
-
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return -EINPROGRESS;
-}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -3742,7 +3777,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3763,7 +3798,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3785,7 +3820,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3805,7 +3840,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3826,7 +3861,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3847,7 +3882,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3867,6 +3902,133 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-digest_null-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 96c9335ee728..d1673a5d4bf5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -226,15 +226,6 @@
#define SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
- 96, 112, 120, 136, 144, 160, 168, 184,
- 192, 208, 216, 232, 240, 256, 264, 280,
- 288, 304, 312, 328, 336, 352, 360, 376};
-unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
- 112, 112, 128, 128, 144, 144, 160, 160,
- 192, 192, 208, 208, 224, 224, 240, 240,
- 272, 272, 288, 288, 304, 304, 320, 320};
-
struct algo_param {
unsigned int auth_mode;
unsigned int mk_size;
@@ -404,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w)
return *(u32 *)(&bytes[0]);
}
-static u32 round_constant[11] = {
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000, 0x6C000000
-};
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f5a2624081dc..04f277cade7c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ .tx_handler = chcr_uld_tx_handler,
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
struct uld_ctx *assign_chcr_device(void)
@@ -164,6 +167,10 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(lld);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
out:
return u_ctx;
}
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
+{
+ return chcr_ipsec_xmit(skb, dev);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 94e7412f6164..3c29ee09b8b5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -39,6 +39,7 @@
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
+#include "t4_msg.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
@@ -89,12 +90,49 @@ struct uld_ctx {
struct chcr_dev *dev;
};
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 94a87e3ad9bc..7daf0a17a7d2 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -134,14 +134,16 @@
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000
#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000
#define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000
#define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
@@ -210,8 +212,6 @@ struct dsgl_walk {
struct phys_sge_pairs *to;
};
-
-
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -227,21 +227,18 @@ struct __aead_ctx {
struct chcr_authenc_ctx authenc[0];
};
-
-
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
u16 hmac_ctrl;
u16 mayverify;
struct __aead_ctx ctx[0];
};
-
-
struct hmac_ctx {
struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -307,44 +304,29 @@ typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
int size,
unsigned short op_type);
-static int chcr_aead_op(struct aead_request *req_base,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn);
-static inline int get_aead_subtype(struct crypto_aead *aead);
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err);
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
-static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
- unsigned short op_type);
-static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
- *req, unsigned short op_type);
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid);
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type);
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam);
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req);
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req);
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid);
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen, unsigned short op_type,
+ unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+ unsigned int assoclen, unsigned short op_type);
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param);
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req);
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
new file mode 100644
index 000000000000..db1e241104ed
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -0,0 +1,654 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE 8
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+ struct net_device *netdev = NULL;
+ int i;
+
+ for (i = 0; i < lld->nports; i++) {
+ netdev = lld->ports[i];
+ if (!netdev)
+ continue;
+ netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ rtnl_lock();
+ netdev_change_features(netdev);
+ rtnl_unlock();
+ }
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int hmac_ctrl;
+ int authsize = x->aead->alg_icv_len / 8;
+
+ sa_entry->authsize = authsize;
+
+ switch (authsize) {
+ case ICV_8:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ break;
+ case ICV_12:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ break;
+ case ICV_16:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct crypto_cipher *cipher;
+ int keylen = (x->aead->alg_key_len + 7) / 8;
+ unsigned char *key = x->aead->alg_key;
+ int ck_size, key_ctx_size = 0;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ int ret = 0;
+
+ if (keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(sa_entry->salt, key + keylen, 4);
+ }
+
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(sa_entry->key, key, keylen);
+ sa_entry->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+
+ sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+ if (IS_ERR(cipher)) {
+ sa_entry->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret) {
+ sa_entry->enckey_len = 0;
+ goto out1;
+ }
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+ memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+ 16), ghash_h, AEAD_H_SIZE);
+ sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+ int res = 0;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128 &&
+ x->aead->alg_icv_len != 96) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+out:
+ return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kfree(sa_entry);
+ module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+
+ return true;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+{
+ int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ unsigned int kctx_len)
+{
+ unsigned int flits;
+ int hdrlen = is_eth_imm(skb, kctx_len);
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ struct cpl_tx_pkt_core *cpl;
+ u64 cntrl = 0;
+ u32 ctrl0, qidx;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cpl = (struct cpl_tx_pkt_core *)pos;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+ if (skb_vlan_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(ctrl0);
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
+ return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ unsigned int len, qidx;
+ struct _key_ctx *key_ctx;
+ int left, eoq, key_len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+ key_len = sa_entry->kctx_len;
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ left = eoq;
+ if (!eoq) {
+ pos = q->q.desc;
+ left = 64 * q->q.size;
+ }
+
+ /* Copy the Key context header */
+ key_ctx = (struct _key_ctx *)pos;
+ key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+ memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+ pos += sizeof(struct _key_ctx);
+ left -= sizeof(struct _key_ctx);
+
+ if (likely(len <= left)) {
+ memcpy(key_ctx->key, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ if (key_len <= left) {
+ memcpy(pos, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ }
+ /* Copy CPL TX PKT XT */
+ pos = copy_cpltx_pktxt(skb, dev, pos);
+
+ return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ unsigned int immdatalen = 0;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
+ unsigned int flits;
+ u32 wr_mid;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+ int qid = q->q.cntxt_id;
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+ flits = calc_tx_sec_flits(skb, kctx_len);
+
+ if (is_eth_imm(skb, kctx_len))
+ immdatalen = skb->len;
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+ wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+
+ /* Sub-command */
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ immdatalen);
+
+ /* CPL_SEC_PDU */
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1)));
+
+ wr->req.sec_cpl.pldlen = htonl(skb->len);
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ (skb_transport_offset(skb) + 1),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr)),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1), 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1,
+ sa_entry->authsize,
+ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata) +
+ sizeof(struct cpl_tx_sec_pdu);
+
+ pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/*
+ * chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ int qidx, left, credits;
+ unsigned int flits = 0, ndesc, kctx_len;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ struct port_info *pi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
+
+ if (!x->xso.offload_handle)
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kctx_len = sa_entry->kctx_len;
+
+ if (skb->sp->len != 1) {
+out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+ dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+ flits);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (is_eth_imm(skb, kctx_len))
+ immediate = true;
+
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ /* Setup IPSec CPL */
+ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ if (pos == (u64 *)q->q.stat) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ pos = (void *)q->q.desc;
+ }
+
+ sgl = (void *)pos;
+ if (immediate) {
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
+ dev_consume_skb_any(skb);
+ } else {
+ int last_desc;
+
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+ 0, addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ }
+ txq_advance(&q->q, ndesc);
+
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 451620b475a0..86f5f459762e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* exynos-rng.c - Random Number Generator driver for the Exynos
*
@@ -6,15 +7,6 @@
* Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
* Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -22,12 +14,18 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
#define EXYNOS_RNG_CONTROL 0x0
#define EXYNOS_RNG_STATUS 0x10
+
+#define EXYNOS_RNG_SEED_CONF 0x14
+#define EXYNOS_RNG_GEN_PRNG BIT(1)
+
#define EXYNOS_RNG_SEED_BASE 0x140
#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
#define EXYNOS_RNG_OUT_BASE 0x160
@@ -43,13 +41,21 @@
#define EXYNOS_RNG_SEED_REGS 5
#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
+enum exynos_prng_type {
+ EXYNOS_PRNG_UNKNOWN = 0,
+ EXYNOS_PRNG_EXYNOS4,
+ EXYNOS_PRNG_EXYNOS5,
+};
+
/*
- * Driver re-seeds itself with generated random numbers to increase
- * the randomness.
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
*
* Time for next re-seed in ms.
*/
-#define EXYNOS_RNG_RESEED_TIME 100
+#define EXYNOS_RNG_RESEED_TIME 1000
+#define EXYNOS_RNG_RESEED_BYTES 65536
+
/*
* In polling mode, do not wait infinitely for the engine to finish the work.
*/
@@ -63,13 +69,17 @@ struct exynos_rng_ctx {
/* Device associated memory */
struct exynos_rng_dev {
struct device *dev;
+ enum exynos_prng_type type;
void __iomem *mem;
struct clk *clk;
+ struct mutex lock;
/* Generated numbers stored for seeding during resume */
u8 seed_save[EXYNOS_RNG_SEED_SIZE];
unsigned int seed_save_len;
/* Time of last seeding in jiffies */
unsigned long last_seeding;
+ /* Bytes generated since last seeding */
+ unsigned long bytes_seeding;
};
static struct exynos_rng_dev *exynos_rng_dev;
@@ -114,39 +124,12 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
}
rng->last_seeding = jiffies;
+ rng->bytes_seeding = 0;
return 0;
}
/*
- * Read from output registers and put the data under 'dst' array,
- * up to dlen bytes.
- *
- * Returns number of bytes actually stored in 'dst' (dlen
- * or EXYNOS_RNG_SEED_SIZE).
- */
-static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
- u8 *dst, unsigned int dlen)
-{
- unsigned int cnt = 0;
- int i, j;
- u32 val;
-
- for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
- val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
-
- for (i = 0; i < 4; i++) {
- dst[cnt] = val & 0xff;
- val >>= 8;
- if (++cnt >= dlen)
- return cnt;
- }
- }
-
- return cnt;
-}
-
-/*
* Start the engine and poll for finish. Then read from output registers
* filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
* random data (EXYNOS_RNG_SEED_SIZE).
@@ -160,8 +143,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
{
int retry = EXYNOS_RNG_WAIT_RETRIES;
- exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
- EXYNOS_RNG_CONTROL);
+ if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+ exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+ } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+ exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+ EXYNOS_RNG_SEED_CONF);
+ }
while (!(exynos_rng_readl(rng,
EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
@@ -173,7 +161,9 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
/* Clear status bit */
exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
EXYNOS_RNG_STATUS);
- *read = exynos_rng_copy_random(rng, dst, dlen);
+ *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+ memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+ rng->bytes_seeding += *read;
return 0;
}
@@ -187,13 +177,18 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
unsigned int read = 0;
u8 seed[EXYNOS_RNG_SEED_SIZE];
- if (time_before(now, next_seeding))
+ if (time_before(now, next_seeding) &&
+ rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
return;
if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
return;
exynos_rng_set_seed(rng, seed, read);
+
+ /* Let others do some of their job. */
+ mutex_unlock(&rng->lock);
+ mutex_lock(&rng->lock);
}
static int exynos_rng_generate(struct crypto_rng *tfm,
@@ -209,6 +204,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
do {
ret = exynos_rng_get_random(rng, dst, dlen, &read);
if (ret)
@@ -219,6 +215,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
exynos_rng_reseed(rng);
} while (dlen > 0);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -236,7 +233,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -259,7 +258,7 @@ static struct rng_alg exynos_rng_alg = {
.base = {
.cra_name = "stdrng",
.cra_driver_name = "exynos_rng",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_ctxsize = sizeof(struct exynos_rng_ctx),
.cra_module = THIS_MODULE,
.cra_init = exynos_rng_kcapi_init,
@@ -279,6 +278,10 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
+ rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+
+ mutex_init(&rng->lock);
+
rng->dev = &pdev->dev;
rng->clk = devm_clk_get(&pdev->dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -329,9 +332,14 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
/* Get new random numbers and store them for seeding on resume. */
exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
&(rng->seed_save_len));
+
+ mutex_unlock(&rng->lock);
+
dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
rng->seed_save_len);
@@ -354,8 +362,12 @@ static int __maybe_unused exynos_rng_resume(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+ mutex_unlock(&rng->lock);
+
clk_disable_unprepare(rng->clk);
return ret;
@@ -367,6 +379,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS4,
+ }, {
+ .compatible = "samsung,exynos5250-prng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS5,
},
{ },
};
@@ -386,4 +402,4 @@ module_platform_driver(exynos_rng_driver);
MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e09d4055b19e..a5a36fe7bf2c 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2579,6 +2579,7 @@ err_out_unmap_bars:
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
+ kfree(dev);
err_out_free_regions:
pci_release_regions(pdev);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4bcef78a08aa..225e74a7f724 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
- priv->base + ctrl);
+ EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Release engine from reset */
- val = readl(priv->base + ctrl);
+ val = readl(EIP197_PE(priv) + ctrl);
val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
- writel(val, priv->base + ctrl);
+ writel(val, EIP197_PE(priv) + ctrl);
}
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
@@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
}
/* Clear the scratchpad memory */
- val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
+ memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
@@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, cd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
priv->config.cd_size,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.cd_offset),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- writel(val,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(5, 0),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
}
return 0;
@@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, rd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
priv->config.rd_size,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
writel(val,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(7, 0),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* enable ring interrupt */
- val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
val |= EIP197_RDR_IRQ(i);
- writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
}
return 0;
@@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
int i, ret;
/* Determine endianess and configure byte swap */
- version = readl(priv->base + EIP197_HIA_VERSION);
- val = readl(priv->base + EIP197_HIA_MST_CTRL);
+ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
val |= EIP197_MST_CTRL_BYTE_SWAP;
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
- writel(val, priv->base + EIP197_HIA_MST_CTRL);
-
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
- priv->base + EIP197_MST_CTRL);
+ EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
/* Interrupts reset */
/* Disable all global interrupts */
- writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
+ writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
/* Clear any pending interrupt */
- writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
/* Data Fetch Engine configuration */
/* Reset all DFE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
@@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, priv->base + EIP197_HIA_DFE_CFG);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
/* Leave the DFE threads reset state */
- writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- priv->base + EIP197_PE_IN_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- priv->base + EIP197_PE_IN_TBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Wait for all DSE threads to complete */
- while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
GENMASK(15, 12)) != GENMASK(15, 12))
;
@@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, priv->base + EIP197_HIA_DSE_CFG);
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
/* Leave the DSE threads reset state */
- writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- priv->base + EIP197_PE_OUT_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
/* Processing Engine configuration */
@@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
val |= EIP197_ALG_SHA2;
- writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Clear interrupts for this ring */
writel(GENMASK(31, 0),
- priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+ EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
/* Disable external triggering */
- writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Result Descriptor Ring prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Disable external triggering*/
- writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Enable command descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Enable result descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Clear any HIA interrupt */
- writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- eip197_trc_cache_init(priv);
+ if (priv->version == EIP197) {
+ eip197_trc_cache_init(priv);
- ret = eip197_load_firmwares(priv);
- if (ret)
- return ret;
+ ret = eip197_load_firmwares(priv);
+ if (ret)
+ return ret;
+ }
safexcel_hw_setup_cdesc_rings(priv);
safexcel_hw_setup_rdesc_rings(priv);
@@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0;
}
+/* Called with ring's lock taken */
+static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring, int reqs)
+{
+ int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+
+ if (!coal)
+ return 0;
+
+ /* Configure when we want an interrupt */
+ writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+ EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+
+ return coal;
+}
+
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
@@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
- priv->ring[ring].need_dequeue = false;
+ /* If a request wasn't properly dequeued because of a lack of resources,
+ * proceeded it first,
+ */
+ req = priv->ring[ring].req;
+ backlog = priv->ring[ring].backlog;
+ if (req)
+ goto handle_req;
- do {
+ while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!req)
+ if (!req) {
+ priv->ring[ring].req = NULL;
+ priv->ring[ring].backlog = NULL;
goto finalize;
+ }
+handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request) {
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, req);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
-
- priv->ring[ring].need_dequeue = true;
- goto finalize;
- }
+ if (!request)
+ goto request_failed;
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
- req->complete(req, ret);
- priv->ring[ring].need_dequeue = true;
- goto finalize;
+ goto request_failed;
}
if (backlog)
@@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
cdesc += commands;
rdesc += results;
- } while (nreq++ < EIP197_MAX_BATCH_SZ);
+ nreq++;
+ }
+
+request_failed:
+ /* Not enough resources to handle all the requests. Bail out and save
+ * the request and the backlog for the next dequeue call (per-ring).
+ */
+ priv->ring[ring].req = req;
+ priv->ring[ring].backlog = backlog;
finalize:
- if (nreq == EIP197_MAX_BATCH_SZ)
- priv->ring[ring].need_dequeue = true;
- else if (!nreq)
+ if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].lock);
+ spin_lock_bh(&priv->ring[ring].egress_lock);
- /* Configure when we want an interrupt */
- writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
- EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
+ if (!priv->ring[ring].busy) {
+ nreq -= safexcel_try_push_requests(priv, ring, nreq);
+ if (nreq)
+ priv->ring[ring].busy = true;
+ }
+
+ priv->ring[ring].requests_left += nreq;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
-
- spin_unlock_bh(&priv->ring[ring].lock);
+ EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
@@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
}
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request)
@@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc = 0;
+ int ret, i, nreq, ndesc, tot_descs, done;
bool should_complete;
- nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
- nreq >>= 24;
- nreq &= GENMASK(6, 0);
+handle_results:
+ tot_descs = 0;
+
+ nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+ nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
if (!nreq)
- return;
+ goto requests_left;
for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -609,13 +648,9 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
if (ndesc < 0) {
kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
- return;
+ goto acknowledge;
}
- writel(EIP197_xDR_PROC_xD_PKT(1) |
- EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
-
if (should_complete) {
local_bh_disable();
sreq->req->complete(sreq->req, ret);
@@ -623,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
}
kfree(sreq);
+ tot_descs += ndesc;
}
+
+acknowledge:
+ if (i) {
+ writel(EIP197_xDR_PROC_xD_PKT(i) |
+ EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ }
+
+ /* If the number of requests overflowed the counter, try to proceed more
+ * requests.
+ */
+ if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+ goto handle_results;
+
+requests_left:
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+
+ done = safexcel_try_push_requests(priv, ring,
+ priv->ring[ring].requests_left);
+
+ priv->ring[ring].requests_left -= done;
+ if (!done && !priv->ring[ring].requests_left)
+ priv->ring[ring].busy = false;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
}
-static void safexcel_handle_result_work(struct work_struct *work)
+static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
- struct safexcel_crypto_priv *priv = data->priv;
-
- safexcel_handle_result_descriptor(priv, data->ring);
- if (priv->ring[data->ring].need_dequeue)
- safexcel_dequeue(data->priv, data->ring);
+ safexcel_dequeue(data->priv, data->ring);
}
struct safexcel_ring_irq_data {
@@ -647,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
- int ring = irq_data->ring;
+ int ring = irq_data->ring, rc = IRQ_NONE;
u32 status, stat;
- status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+ status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
if (!status)
- return IRQ_NONE;
+ return rc;
/* RDR interrupts */
if (status & EIP197_RDR_IRQ(ring)) {
- stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
if (unlikely(stat & EIP197_xDR_ERR)) {
/*
@@ -666,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
*/
dev_err(priv->dev, "RDR: fatal error.");
} else if (likely(stat & EIP197_xDR_THRESH)) {
- queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+ rc = IRQ_WAKE_THREAD;
}
/* ACK the interrupts */
writel(stat & 0xff,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
}
/* ACK the interrupts */
- writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
+ writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+ return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+ struct safexcel_ring_irq_data *irq_data = data;
+ struct safexcel_crypto_priv *priv = irq_data->priv;
+ int ring = irq_data->ring;
+
+ safexcel_handle_result_descriptor(priv, ring);
+
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return IRQ_HANDLED;
}
static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
irq_handler_t handler,
+ irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq = platform_get_irq_byname(pdev, name);
@@ -691,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
return irq;
}
- ret = devm_request_irq(&pdev->dev, irq, handler, 0,
- dev_name(&pdev->dev), ring_irq_priv);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+ threaded_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), ring_irq_priv);
if (ret) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
return ret;
@@ -755,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
u32 val, mask;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
@@ -769,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+ struct safexcel_register_offsets *offsets = &priv->offsets;
+
+ if (priv->version == EIP197) {
+ offsets->hia_aic = EIP197_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP197_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP197_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP197_PE_BASE;
+ } else {
+ offsets->hia_aic = EIP97_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP97_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP97_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP97_PE_BASE;
+ }
+}
+
static int safexcel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -781,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
+ priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+ safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
@@ -839,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+ safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
ret = irq;
@@ -847,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
- INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+ INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
@@ -856,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}
+ priv->ring[i].requests_left = 0;
+ priv->ring[i].busy = false;
+
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
@@ -903,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev)
}
static const struct of_device_id safexcel_of_match_table[] = {
- { .compatible = "inside-secure,safexcel-eip197" },
+ {
+ .compatible = "inside-secure,safexcel-eip97",
+ .data = (void *)EIP97,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197",
+ .data = (void *)EIP197,
+ },
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 304c5838c11a..4e219c21608b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -19,64 +19,103 @@
#define EIP197_HIA_VERSION_BE 0x35ca
/* Static configuration */
-#define EIP197_DEFAULT_RING_SIZE 64
+#define EIP197_DEFAULT_RING_SIZE 400
#define EIP197_MAX_TOKENS 5
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_COUNT 1
-#define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE
+#define EIP197_MAX_BATCH_SZ 64
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE 0x90000
+#define EIP197_HIA_AIC_G_BASE 0x90000
+#define EIP197_HIA_AIC_R_BASE 0x90800
+#define EIP197_HIA_AIC_xDR_BASE 0x80000
+#define EIP197_HIA_DFE_BASE 0x8c000
+#define EIP197_HIA_DFE_THR_BASE 0x8c040
+#define EIP197_HIA_DSE_BASE 0x8d000
+#define EIP197_HIA_DSE_THR_BASE 0x8d040
+#define EIP197_HIA_GEN_CFG_BASE 0xf0000
+#define EIP197_PE_BASE 0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE 0x0
+#define EIP97_HIA_AIC_G_BASE 0x0
+#define EIP97_HIA_AIC_R_BASE 0x0
+#define EIP97_HIA_AIC_xDR_BASE 0x0
+#define EIP97_HIA_DFE_BASE 0xf000
+#define EIP97_HIA_DFE_THR_BASE 0xf200
+#define EIP97_HIA_DSE_BASE 0xf400
+#define EIP97_HIA_DSE_THR_BASE 0xf600
+#define EIP97_HIA_GEN_CFG_BASE 0x10000
+#define EIP97_PE_BASE 0x10000
+
/* CDR/RDR register offsets */
-#define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000)
-#define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r))
-#define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800)
-#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0
-#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4
-#define EIP197_HIA_xDR_RING_SIZE 0x18
-#define EIP197_HIA_xDR_DESC_SIZE 0x1c
-#define EIP197_HIA_xDR_CFG 0x20
-#define EIP197_HIA_xDR_DMA_CFG 0x24
-#define EIP197_HIA_xDR_THRESH 0x28
-#define EIP197_HIA_xDR_PREP_COUNT 0x2c
-#define EIP197_HIA_xDR_PROC_COUNT 0x30
-#define EIP197_HIA_xDR_PREP_PNTR 0x34
-#define EIP197_HIA_xDR_PROC_PNTR 0x38
-#define EIP197_HIA_xDR_STAT 0x3c
+#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004
+#define EIP197_HIA_xDR_RING_SIZE 0x0018
+#define EIP197_HIA_xDR_DESC_SIZE 0x001c
+#define EIP197_HIA_xDR_CFG 0x0020
+#define EIP197_HIA_xDR_DMA_CFG 0x0024
+#define EIP197_HIA_xDR_THRESH 0x0028
+#define EIP197_HIA_xDR_PREP_COUNT 0x002c
+#define EIP197_HIA_xDR_PROC_COUNT 0x0030
+#define EIP197_HIA_xDR_PREP_PNTR 0x0034
+#define EIP197_HIA_xDR_PROC_PNTR 0x0038
+#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x8c000
-#define EIP197_HIA_DFE_THR_CTRL 0x8c040
-#define EIP197_HIA_DFE_THR_STAT 0x8c044
-#define EIP197_HIA_DSE_CFG 0x8d000
-#define EIP197_HIA_DSE_THR_CTRL 0x8d040
-#define EIP197_HIA_DSE_THR_STAT 0x8d044
-#define EIP197_HIA_RA_PE_CTRL 0x90010
-#define EIP197_HIA_RA_PE_STAT 0x90014
+#define EIP197_HIA_DFE_CFG 0x0000
+#define EIP197_HIA_DFE_THR_CTRL 0x0000
+#define EIP197_HIA_DFE_THR_STAT 0x0004
+#define EIP197_HIA_DSE_CFG 0x0000
+#define EIP197_HIA_DSE_THR_CTRL 0x0000
+#define EIP197_HIA_DSE_THR_STAT 0x0004
+#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
-#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808
-#define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810
-#define EIP197_HIA_AIC_G_ACK 0x9f810
-#define EIP197_HIA_MST_CTRL 0x9fff4
-#define EIP197_HIA_OPTIONS 0x9fff8
-#define EIP197_HIA_VERSION 0x9fffc
-#define EIP197_PE_IN_DBUF_THRES 0xa0000
-#define EIP197_PE_IN_TBUF_THRES 0xa0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0xa0800
-#define EIP197_PE_ICE_PUE_CTRL 0xa0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04
-#define EIP197_PE_ICE_FPP_CTRL 0xa0d80
-#define EIP197_PE_ICE_RAM_CTRL 0xa0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0xa1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c
-#define EIP197_PE_OUT_DBUF_THRES 0xa1c00
-#define EIP197_PE_OUT_TBUF_THRES 0xa1d00
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
+#define EIP197_HIA_AIC_G_ACK 0xf810
+#define EIP197_HIA_MST_CTRL 0xfff4
+#define EIP197_HIA_OPTIONS 0xfff8
+#define EIP197_HIA_VERSION 0xfffc
+#define EIP197_PE_IN_DBUF_THRES 0x0000
+#define EIP197_PE_IN_TBUF_THRES 0x0100
+#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
+#define EIP197_PE_ICE_PUE_CTRL 0x0c80
+#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
+#define EIP197_PE_ICE_FPP_CTRL 0x0d80
+#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
+#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
+#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
+#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
+#define EIP197_PE_OUT_DBUF_THRES 0x1c00
+#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_MST_CTRL 0xfff4
+
+/* EIP197-specific registers, no indirection */
#define EIP197_CLASSIFICATION_RAMS 0xe0000
#define EIP197_TRC_CTRL 0xf0800
#define EIP197_TRC_LASTRES 0xf0804
@@ -90,7 +129,6 @@
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
#define EIP197_CS_RAM_CTRL 0xf7ff0
-#define EIP197_MST_CTRL 0xffff4
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
@@ -117,6 +155,8 @@
#define EIP197_xDR_PREP_CLR_COUNT BIT(31)
/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
+#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -463,12 +503,33 @@ struct safexcel_work_data {
int ring;
};
+enum safexcel_eip_version {
+ EIP97,
+ EIP197,
+};
+
+struct safexcel_register_offsets {
+ u32 hia_aic;
+ u32 hia_aic_g;
+ u32 hia_aic_r;
+ u32 hia_aic_xdr;
+ u32 hia_dfe;
+ u32 hia_dfe_thr;
+ u32 hia_dse;
+ u32 hia_dse_thr;
+ u32 hia_gen_cfg;
+ u32 pe;
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
struct safexcel_config config;
+ enum safexcel_eip_version version;
+ struct safexcel_register_offsets offsets;
+
/* context DMA pool */
struct dma_pool *context_pool;
@@ -489,7 +550,20 @@ struct safexcel_crypto_priv {
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;
- bool need_dequeue;
+
+ /* Number of requests in the engine that needs the threshold
+ * interrupt to be set up.
+ */
+ int requests_left;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
} ring[EIP197_MAX_RINGS];
};
@@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req,
int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index fcc0a606d748..63a8768ed2ae 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -27,7 +27,6 @@ struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
- enum safexcel_cipher_direction direction;
u32 mode;
__le32 key[8];
@@ -35,6 +34,7 @@ struct safexcel_cipher_ctx {
};
struct safexcel_cipher_req {
+ enum safexcel_cipher_direction direction;
bool needs_inv;
};
@@ -69,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -78,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
}
- for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+ if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
@@ -95,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+ struct crypto_async_request *async,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->priv;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ctrl_size;
- if (ctx->direction == SAFEXCEL_ENCRYPT)
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
else
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
@@ -243,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
n_cdesc++;
if (n_cdesc == 1) {
- safexcel_context_control(ctx, cdesc);
+ safexcel_context_control(ctx, async, cdesc);
safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
}
@@ -353,8 +359,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
@@ -390,7 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(async, &ctx->base, priv,
+ ret = safexcel_invalidate_cache(async, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -406,9 +412,13 @@ static int safexcel_send(struct crypto_async_request *async,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
+ BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+
if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, request,
commands, results);
@@ -443,8 +453,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -467,11 +477,11 @@ static int safexcel_aes(struct skcipher_request *req,
int ret, ring;
sreq->needs_inv = false;
- ctx->direction = dir;
+ sreq->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv) {
+ if (priv->version == EIP197 && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
@@ -490,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req,
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -539,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
- ret = safexcel_cipher_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_cipher_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_ecb_aes = {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 0c5a5820b06e..122a2a58e98f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -14,7 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include "safexcel.h"
struct safexcel_ahash_ctx {
@@ -34,6 +33,8 @@ struct safexcel_ahash_req {
bool hmac;
bool needs_inv;
+ int nents;
+
u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
@@ -152,8 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
- dma_unmap_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+ if (sreq->nents) {
+ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ sreq->nents = 0;
+ }
safexcel_free_context(priv, async, sreq->state_sz);
@@ -178,7 +181,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -186,17 +189,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
else
cache_len = queued - areq->nbytes;
- /*
- * If this is not the last request and the queued data does not fit
- * into full blocks, cache it for the next send() call.
- */
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
- if (!req->last_req && extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra, areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ if (!req->last_req) {
+ /* If this is not the last request and the queued data does not
+ * fit into full blocks, cache it for the next send() call.
+ */
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (!extra)
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+ extra = queued - crypto_ahash_blocksize(ahash);
+
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
+
+ queued -= extra;
+ len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
+ }
}
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -234,15 +251,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
- DMA_TO_DEVICE);
- if (!nents) {
+ req->nents = dma_map_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
+ if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
- for_each_sg(areq->src, sg, nents, i) {
+ for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
@@ -382,8 +399,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
@@ -398,6 +415,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
+ BUG_ON(priv->version == EIP97 && req->needs_inv);
+
if (req->needs_inv) {
req->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async,
@@ -418,7 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -471,8 +490,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -485,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
return 0;
}
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
int queued, cache_len;
+ /* cache_len: everyting accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
cache_len = req->len - areq->nbytes - req->processed;
+ /* queued: everything accepted by the driver which will be handled by
+ * the next send() calls.
+ * tot sz handled by update() - tot sz handled by send()
+ */
queued = req->len - req->processed;
/*
@@ -505,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
return areq->nbytes;
}
- /* We could'nt cache all the data */
+ /* We couldn't cache all the data */
return -E2BIG;
}
@@ -518,10 +547,17 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
req->needs_inv = false;
- if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
- ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
-
if (ctx->base.ctxr) {
+ if (priv->version == EIP197 &&
+ !ctx->base.needs_inv && req->processed &&
+ ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ /* We're still setting needs_inv here, even though it is
+ * cleared right away, because the needs_inv flag can be
+ * set in other functions and we want to keep the same
+ * logic.
+ */
+ ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
if (ctx->base.needs_inv) {
ctx->base.needs_inv = false;
req->needs_inv = true;
@@ -541,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -625,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->processed = req->processed;
memcpy(export->state, req->state, req->state_sz);
- memset(export->cache, 0, crypto_ahash_blocksize(ahash));
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
return 0;
@@ -707,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- ret = safexcel_ahash_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_ahash_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_sha1 = {
@@ -848,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS)
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
return ret;
wait_for_completion_interruptible(&result.completion);
@@ -913,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
@@ -920,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
- ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+ ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8705b28eb02c..717a26607bdb 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,12 +260,11 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_alloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_zalloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
- memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 293832488cc9..aca2373fa1de 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -15,6 +15,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -24,6 +25,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -409,8 +411,11 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
- engine->sram_dma = phys_to_dma(cesa->dev,
- (phys_addr_t)res->start);
+ engine->sram_dma = dma_map_resource(cesa->dev, res->start,
+ cesa->sram_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(cesa->dev, engine->sram_dma))
+ return -ENOMEM;
return 0;
}
@@ -420,11 +425,12 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- if (!engine->pool)
- return;
-
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
- cesa->sram_size);
+ if (engine->pool)
+ gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ cesa->sram_size);
+ else
+ dma_unmap_resource(cesa->dev, engine->sram_dma,
+ cesa->sram_size, DMA_BIDIRECTIONAL, 0);
}
static int mv_cesa_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index f2246a5abcf6..1e87637c412d 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -743,8 +743,8 @@ static int nx842_open_percpu_txwins(void)
}
if (!per_cpu(cpu_txwin, i)) {
- /* shoudn't happen, Each chip will have NX engine */
- pr_err("NX engine is not availavle for CPU %d\n", i);
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
return -EINVAL;
}
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 5a6dc53b2b9d..4ef52c9d72fc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1618,7 +1618,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table);
static int spacc_probe(struct platform_device *pdev)
{
- int i, err, ret = -EINVAL;
+ int i, err, ret;
struct resource *mem, *irq;
struct device_node *np = pdev->dev.of_node;
struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
@@ -1679,22 +1679,18 @@ static int spacc_probe(struct platform_device *pdev)
engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
return PTR_ERR(engine->clk);
}
if (clk_prepare_enable(engine->clk)) {
dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- clk_put(engine->clk);
- return -EIO;
+ ret = -EIO;
+ goto err_clk_put;
}
- err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (err) {
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
- return err;
- }
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
/*
@@ -1725,6 +1721,7 @@ static int spacc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, engine);
+ ret = -EINVAL;
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
@@ -1759,6 +1756,16 @@ static int spacc_probe(struct platform_device *pdev)
engine->aeads[i].alg.base.cra_name);
}
+ if (!ret)
+ return 0;
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+err_clk_disable:
+ clk_disable_unprepare(engine->clk);
+err_clk_put:
+ clk_put(engine->clk);
+
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 8c4fd255a601..ff149e176f64 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
#define CSR_RETRY_TIMES 500
static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned int csr,
- unsigned int *value)
+ unsigned char ae, unsigned int csr)
{
unsigned int iterations = CSR_RETRY_TIMES;
+ int value;
do {
- *value = GET_AE_CSR(handle, ae, csr);
+ value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
- return 0;
+ return value;
} while (iterations--);
pr_err("QAT: Read CSR timeout\n");
- return -EFAULT;
+ return 0;
}
static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+ *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
elapsed_cycles = cur_cnt - base_cnt;
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
}
/* Sets the accelaration engine context mode to either four or eight */
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
-static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- unsigned int ae_csr, unsigned int *csr_val)
+ unsigned int ae_csr)
{
- unsigned int cur_ctx;
+ unsigned int cur_ctx, csr_val;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+ return csr_val;
}
static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
@@ -358,7 +360,7 @@ static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -374,7 +376,7 @@ static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -392,13 +394,11 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
int times = MAX_RETRY_TIMES;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
do {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
} while (times-- && (cur_cnt == base_cnt));
@@ -416,8 +416,8 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
{
unsigned int enable = 0, active = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+ enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
@@ -540,7 +540,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK &
(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
@@ -583,7 +583,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_addr;
unsigned int i;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -604,7 +604,7 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK;
ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
ctx |= (ctx_mask << CE_ENABLE_BITPOS);
@@ -636,10 +636,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr_val &= IGNORE_W1C_MASK;
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
@@ -648,7 +648,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, ctx_mask,
@@ -760,7 +760,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
unsigned int csr_val = 0;
- qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
}
@@ -826,16 +826,16 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+ misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
misc_control & 0xfffffffb);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
for (i = 0; i < words_num; i++) {
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
uaddr++;
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+ uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+ uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
uword[i] = uwrd_hi;
uword[i] = (uword[i] << 0x20) | uwrd_lo;
}
@@ -849,7 +849,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
{
unsigned int i, ustore_addr;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -890,26 +890,27 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
/* save current context */
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
- &ind_lm_addr_byte0);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
- &ind_lm_addr_byte1);
+ ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+ ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+ ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX);
+ ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX);
if (inst_num <= MAX_EXEC_INST)
qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
- qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
- qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
- &ind_cnt_sig);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
- qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+ savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ FUTURE_COUNT_SIGNAL_INDIRECT);
+ ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_SIG_EVENTS_INDIRECT);
+ act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
/* execute micro codes */
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
@@ -927,8 +928,8 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
if (endpc) {
unsigned int ctx_status;
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
- &ctx_status);
+ ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_STS_INDIRECT);
*endpc = ctx_status & handle->hal_handle->upc_mask;
}
/* retore to saved context */
@@ -938,7 +939,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & savpc);
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
@@ -986,16 +987,16 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
ctx & ACS_ACNO);
qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr = UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
insts = qat_hal_set_uword_ecc(insts);
@@ -1011,7 +1012,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
* the instruction should have been executed
* prior to clearing the ECS in putUwords
*/
- qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+ *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
if (ctx != (savctx & ACS_ACNO))
@@ -1188,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned short mask;
unsigned short dr_offset = 0x10;
- status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1238,7 +1239,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
const unsigned short gprnum = 0, dly = num_inst * 0x5;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1282,7 +1283,7 @@ static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
unsigned int ctx_enables;
int stat = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
@@ -1299,7 +1300,7 @@ static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
{
unsigned int ctx_enables;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (ctx_enables & CE_INUSE_CONTEXTS) {
/* 4-ctx mode */
*relreg = absreg_num & 0x1F;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 142c6020cec7..188f44b7eb27 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,17 +1,13 @@
-/*
- * Cryptographic API.
- *
- * Support for Samsung S5PV210 and Exynos HW acceleration.
- *
- * Copyright (C) 2011 NetUP Inc. All rights reserved.
- * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Hash part based on omap-sham.c driver.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Cryptographic API.
+//
+// Support for Samsung S5PV210 and Exynos HW acceleration.
+//
+// Copyright (C) 2011 NetUP Inc. All rights reserved.
+// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
+//
+// Hash part based on omap-sham.c driver.
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -1461,7 +1457,7 @@ static void s5p_hash_tasklet_cb(unsigned long data)
&dd->hash_flags)) {
/* hash or semi-hash ready */
clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
- goto finish;
+ goto finish;
}
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 602332e02729..63aa78c0b12b 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,4 +1,4 @@
-config CRC_DEV_STM32
+config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
@@ -6,7 +6,7 @@ config CRC_DEV_STM32
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
-config HASH_DEV_STM32
+config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32
depends on HAS_DMA
@@ -18,3 +18,12 @@ config HASH_DEV_STM32
help
This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_CRYP
+ tristate "Support for STM32 cryp accelerators"
+ depends on ARCH_STM32
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73cd56cad0cc..53d1bb94b221 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
-obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
new file mode 100644
index 000000000000..4a06a7a665ee
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+
+#define DRIVER_NAME "stm32-cryp"
+
+/* Bit [0] encrypt / decrypt */
+#define FLG_ENCRYPT BIT(0)
+/* Bit [8..1] algo & operation mode */
+#define FLG_AES BIT(1)
+#define FLG_DES BIT(2)
+#define FLG_TDES BIT(3)
+#define FLG_ECB BIT(4)
+#define FLG_CBC BIT(5)
+#define FLG_CTR BIT(6)
+/* Mode mask = bits [15..0] */
+#define FLG_MODE_MASK GENMASK(15, 0)
+
+/* Registers */
+#define CRYP_CR 0x00000000
+#define CRYP_SR 0x00000004
+#define CRYP_DIN 0x00000008
+#define CRYP_DOUT 0x0000000C
+#define CRYP_DMACR 0x00000010
+#define CRYP_IMSCR 0x00000014
+#define CRYP_RISR 0x00000018
+#define CRYP_MISR 0x0000001C
+#define CRYP_K0LR 0x00000020
+#define CRYP_K0RR 0x00000024
+#define CRYP_K1LR 0x00000028
+#define CRYP_K1RR 0x0000002C
+#define CRYP_K2LR 0x00000030
+#define CRYP_K2RR 0x00000034
+#define CRYP_K3LR 0x00000038
+#define CRYP_K3RR 0x0000003C
+#define CRYP_IV0LR 0x00000040
+#define CRYP_IV0RR 0x00000044
+#define CRYP_IV1LR 0x00000048
+#define CRYP_IV1RR 0x0000004C
+
+/* Registers values */
+#define CR_DEC_NOT_ENC 0x00000004
+#define CR_TDES_ECB 0x00000000
+#define CR_TDES_CBC 0x00000008
+#define CR_DES_ECB 0x00000010
+#define CR_DES_CBC 0x00000018
+#define CR_AES_ECB 0x00000020
+#define CR_AES_CBC 0x00000028
+#define CR_AES_CTR 0x00000030
+#define CR_AES_KP 0x00000038
+#define CR_AES_UNKNOWN 0xFFFFFFFF
+#define CR_ALGO_MASK 0x00080038
+#define CR_DATA32 0x00000000
+#define CR_DATA16 0x00000040
+#define CR_DATA8 0x00000080
+#define CR_DATA1 0x000000C0
+#define CR_KEY128 0x00000000
+#define CR_KEY192 0x00000100
+#define CR_KEY256 0x00000200
+#define CR_FFLUSH 0x00004000
+#define CR_CRYPEN 0x00008000
+
+#define SR_BUSY 0x00000010
+#define SR_OFNE 0x00000004
+
+#define IMSCR_IN BIT(0)
+#define IMSCR_OUT BIT(1)
+
+#define MISR_IN BIT(0)
+#define MISR_OUT BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+
+struct stm32_cryp_ctx {
+ struct stm32_cryp *cryp;
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct stm32_cryp_reqctx {
+ unsigned long mode;
+};
+
+struct stm32_cryp {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long flags;
+ u32 irq_status;
+ struct stm32_cryp_ctx *ctx;
+
+ struct crypto_engine *engine;
+
+ struct mutex lock; /* protects req */
+ struct ablkcipher_request *req;
+
+ size_t hw_blocksize;
+
+ size_t total_in;
+ size_t total_in_save;
+ size_t total_out;
+ size_t total_out_save;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct scatterlist *out_sg_save;
+
+ struct scatterlist in_sgl;
+ struct scatterlist out_sgl;
+ bool sgs_copied;
+
+ int in_sg_len;
+ int out_sg_len;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+
+ u32 last_ctr[4];
+};
+
+struct stm32_cryp_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct stm32_cryp_list cryp_list = {
+ .dev_list = LIST_HEAD_INIT(cryp_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock),
+};
+
+static inline bool is_aes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_AES;
+}
+
+static inline bool is_des(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_DES;
+}
+
+static inline bool is_tdes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_TDES;
+}
+
+static inline bool is_ecb(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ECB;
+}
+
+static inline bool is_cbc(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CBC;
+}
+
+static inline bool is_ctr(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CTR;
+}
+
+static inline bool is_encrypt(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static inline bool is_decrypt(struct stm32_cryp *cryp)
+{
+ return !is_encrypt(cryp);
+}
+
+static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst)
+{
+ return readl_relaxed(cryp->regs + ofst);
+}
+
+static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val)
+{
+ writel_relaxed(val, cryp->regs + ofst);
+}
+
+static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ !(status & SR_BUSY), 10, 100000);
+}
+
+static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
+{
+ struct stm32_cryp *tmp, *cryp = NULL;
+
+ spin_lock_bh(&cryp_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &cryp_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&cryp_list.lock);
+
+ return cryp;
+}
+
+static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
+ size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
+{
+ int ret;
+
+ ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
+ cryp->hw_blocksize);
+ if (ret)
+ return ret;
+
+ ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
+ cryp->hw_blocksize);
+
+ return ret;
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
+{
+ void *buf_in, *buf_out;
+ int pages, total_in, total_out;
+
+ if (!stm32_cryp_check_io_aligned(cryp)) {
+ cryp->sgs_copied = 0;
+ return 0;
+ }
+
+ total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
+ pages = total_in ? get_order(total_in) : 1;
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
+ pages = total_out ? get_order(total_out) : 1;
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in || !buf_out) {
+ dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
+ cryp->sgs_copied = 0;
+ return -EFAULT;
+ }
+
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
+
+ sg_init_one(&cryp->in_sgl, buf_in, total_in);
+ cryp->in_sg = &cryp->in_sgl;
+ cryp->in_sg_len = 1;
+
+ sg_init_one(&cryp->out_sgl, buf_out, total_out);
+ cryp->out_sg_save = cryp->out_sg;
+ cryp->out_sg = &cryp->out_sgl;
+ cryp->out_sg_len = 1;
+
+ cryp->sgs_copied = 1;
+
+ return 0;
+}
+
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+{
+ if (!iv)
+ return;
+
+ stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+
+ if (is_aes(cryp)) {
+ stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ }
+}
+
+static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
+{
+ unsigned int i;
+ int r_id;
+
+ if (is_des(c)) {
+ stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ } else {
+ r_id = CRYP_K3RR;
+ for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+ stm32_cryp_write(c, r_id,
+ cpu_to_be32(c->ctx->key[i - 1]));
+ }
+}
+
+static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
+{
+ if (is_aes(cryp) && is_ecb(cryp))
+ return CR_AES_ECB;
+
+ if (is_aes(cryp) && is_cbc(cryp))
+ return CR_AES_CBC;
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ return CR_AES_CTR;
+
+ if (is_des(cryp) && is_ecb(cryp))
+ return CR_DES_ECB;
+
+ if (is_des(cryp) && is_cbc(cryp))
+ return CR_DES_CBC;
+
+ if (is_tdes(cryp) && is_ecb(cryp))
+ return CR_TDES_ECB;
+
+ if (is_tdes(cryp) && is_cbc(cryp))
+ return CR_TDES_CBC;
+
+ dev_err(cryp->dev, "Unknown mode\n");
+ return CR_AES_UNKNOWN;
+}
+
+static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+{
+ int ret;
+ u32 cfg, hw_mode;
+
+ /* Disable interrupt */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ /* Set key */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Set configuration */
+ cfg = CR_DATA8 | CR_FFLUSH;
+
+ switch (cryp->ctx->keylen) {
+ case AES_KEYSIZE_128:
+ cfg |= CR_KEY128;
+ break;
+
+ case AES_KEYSIZE_192:
+ cfg |= CR_KEY192;
+ break;
+
+ default:
+ case AES_KEYSIZE_256:
+ cfg |= CR_KEY256;
+ break;
+ }
+
+ hw_mode = stm32_cryp_get_hw_mode(cryp);
+ if (hw_mode == CR_AES_UNKNOWN)
+ return -EINVAL;
+
+ /* AES ECB/CBC decrypt: run key preparation first */
+ if (is_decrypt(cryp) &&
+ ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_busy(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (key preparation)\n");
+ return ret;
+ }
+ }
+
+ cfg |= hw_mode;
+
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
+
+ /* Apply config and flush (valid when CRYPEN = 0) */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ switch (hw_mode) {
+ case CR_DES_CBC:
+ case CR_TDES_CBC:
+ case CR_AES_CBC:
+ case CR_AES_CTR:
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Enable now */
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return 0;
+}
+
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+{
+ int err = 0;
+
+ if (cryp->sgs_copied) {
+ void *buf_in, *buf_out;
+ int pages, len;
+
+ buf_in = sg_virt(&cryp->in_sgl);
+ buf_out = sg_virt(&cryp->out_sgl);
+
+ sg_copy_buf(buf_out, cryp->out_sg_save, 0,
+ cryp->total_out_save, 1);
+
+ len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_in, pages);
+
+ len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_out, pages);
+ }
+
+ crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
+ cryp->req = NULL;
+
+ memset(cryp->ctx->key, 0, cryp->ctx->keylen);
+
+ mutex_unlock(&cryp->lock);
+}
+
+static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
+{
+ /* Enable interrupt and let the IRQ handler do everything */
+ stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+
+ return 0;
+}
+
+static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ return 0;
+}
+
+static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != (3 * DES_KEY_SIZE))
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
+}
+
+static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
+}
+
+static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
+}
+
+static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
+}
+
+static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
+}
+
+static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
+}
+
+static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
+}
+
+static int stm32_cryp_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx;
+ struct stm32_cryp *cryp;
+ struct stm32_cryp_reqctx *rctx;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ mutex_lock(&cryp->lock);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode &= FLG_MODE_MASK;
+
+ ctx->cryp = cryp;
+
+ cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
+ cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
+ cryp->ctx = ctx;
+
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+
+ cryp->total_in_save = cryp->total_in;
+ cryp->total_out_save = cryp->total_out;
+
+ cryp->in_sg = req->src;
+ cryp->out_sg = req->dst;
+ cryp->out_sg_save = cryp->out_sg;
+
+ cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
+ if (cryp->in_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get in_sg_len\n");
+ ret = cryp->in_sg_len;
+ goto out;
+ }
+
+ cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
+ if (cryp->out_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get out_sg_len\n");
+ ret = cryp->out_sg_len;
+ goto out;
+ }
+
+ ret = stm32_cryp_copy_sgs(cryp);
+ if (ret)
+ goto out;
+
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+
+ ret = stm32_cryp_hw_init(cryp);
+out:
+ if (ret)
+ mutex_unlock(&cryp->lock);
+
+ return ret;
+}
+
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return stm32_cryp_prepare_req(engine, req);
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
+static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->out_walk, n);
+
+ if (unlikely(cryp->out_sg->length == _walked_out)) {
+ cryp->out_sg = sg_next(cryp->out_sg);
+ if (cryp->out_sg) {
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ return (sg_virt(cryp->out_sg) + _walked_out);
+ }
+ }
+
+ return (u32 *)((u8 *)dst + n);
+}
+
+static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->in_walk, n);
+
+ if (unlikely(cryp->in_sg->length == _walked_in)) {
+ cryp->in_sg = sg_next(cryp->in_sg);
+ if (cryp->in_sg) {
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ return (sg_virt(cryp->in_sg) + _walked_in);
+ }
+ }
+
+ return (u32 *)((u8 *)src + n);
+}
+
+static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
+{
+ u32 cr;
+
+ if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
+ cryp->last_ctr[3] = 0;
+ cryp->last_ctr[2]++;
+ if (!cryp->last_ctr[2]) {
+ cryp->last_ctr[1]++;
+ if (!cryp->last_ctr[1])
+ cryp->last_ctr[0]++;
+ }
+
+ cr = stm32_cryp_read(cryp, CRYP_CR);
+ stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+
+ stm32_cryp_write(cryp, CRYP_CR, cr);
+ }
+
+ cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
+ cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
+ cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
+ cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+}
+
+static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 d32, *dst;
+ u8 *d8;
+
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_out >= sizeof(u32))) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+
+ return !cryp->total_out || !cryp->total_in;
+}
+
+static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_in >= sizeof(u32))) {
+ /* Write a full u32 */
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= sizeof(u32);
+ } else if (!cryp->total_in) {
+ /* Write padding data */
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ } else {
+ /* Write less than an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (j = 0; j < cryp->total_in; j++) {
+ d8[j] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ cryp->total_in = 0;
+ }
+ }
+}
+
+static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
+{
+ if (unlikely(!cryp->total_in)) {
+ dev_warn(cryp->dev, "No more data to process\n");
+ return;
+ }
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ stm32_cryp_check_ctr_counter(cryp);
+
+ stm32_cryp_irq_write_block(cryp);
+}
+
+static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ if (cryp->irq_status & MISR_OUT)
+ /* Output FIFO IRQ: read data */
+ if (unlikely(stm32_cryp_irq_read_data(cryp))) {
+ /* All bytes processed, finish */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp);
+ return IRQ_HANDLED;
+ }
+
+ if (cryp->irq_status & MISR_IN) {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cryp_irq(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "stm32-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "stm32-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "stm32-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "stm32-ecb-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "stm32-cbc-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "stm32-ecb-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "stm32-cbc-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ }
+},
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "st,stm32f756-cryp", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static int stm32_cryp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_cryp *cryp;
+ struct resource *res;
+ struct reset_control *rst;
+ int irq, ret;
+
+ cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ cryp->dev = dev;
+
+ mutex_init(&cryp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cryp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cryp->regs))
+ return PTR_ERR(cryp->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
+ stm32_cryp_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), cryp);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ cryp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cryp->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(cryp->clk);
+ }
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ rst = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ platform_set_drvdata(pdev, cryp);
+
+ spin_lock(&cryp_list.lock);
+ list_add(&cryp->list, &cryp_list.dev_list);
+ spin_unlock(&cryp_list.lock);
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(dev, 1);
+ if (!cryp->engine) {
+ dev_err(dev, "Could not init crypto engine\n");
+ ret = -ENOMEM;
+ goto err_engine1;
+ }
+
+ cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
+ cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto err_engine2;
+ }
+
+ ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ if (ret) {
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs;
+ }
+
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+
+err_algs:
+err_engine2:
+ crypto_engine_exit(cryp->engine);
+err_engine1:
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return ret;
+}
+
+static int stm32_cryp_remove(struct platform_device *pdev)
+{
+ struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+
+ if (!cryp)
+ return -ENODEV;
+
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+ crypto_engine_exit(cryp->engine);
+
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_cryp_driver = {
+ .probe = stm32_cryp_probe,
+ .remove = stm32_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = stm32_dt_ids,
+ },
+};
+
+module_platform_driver(stm32_cryp_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 090582baecfe..8f09b8430893 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
@@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32c",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 78fb496ecb4e..fe2af6aa88fc 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) {
devres_free(ptr);
- return ERR_PTR(-ENOMEM);
+ return devfreq;
}
*ptr = devfreq;
@@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
- } else if (df->governor->immutable || governor->immutable) {
+ } else if ((df->governor && df->governor->immutable) ||
+ governor->immutable) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index bc1cb284111c..12b62d0aac27 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -157,13 +157,13 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
spin_unlock_irqrestore(&dcb->poll->lock, flags);
}
-static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
struct dma_fence *fence_excl;
- unsigned long events;
+ __poll_t events;
unsigned shared_count, seq;
dmabuf = file->private_data;
@@ -195,7 +195,7 @@ retry:
if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- unsigned long pevents = POLLIN;
+ __poll_t pevents = POLLIN;
if (shared_count == 0)
pevents |= POLLOUT;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 03830634e141..8e8c4a12a0bc 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -312,7 +312,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int sync_file_poll(struct file *file, poll_table *wait)
+static __poll_t sync_file_poll(struct file *file, poll_table *wait)
{
struct sync_file *sync_file = file->private_data;
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b52b0d55247e..97483df1f82e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan)
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
- pl08x_desc_free(&plchan->at->vd);
+ vchan_terminate_vdesc(&plchan->at->vd);
plchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
@@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan)
return 0;
}
+static void pl08x_synchronize(struct dma_chan *chan)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ vchan_synchronize(&plchan->vc);
+}
+
static int pl08x_pause(struct dma_chan *chan)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.device_pause = pl08x_pause;
pl08x->memcpy.device_resume = pl08x_resume;
pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
+ pl08x->memcpy.device_synchronize = pl08x_synchronize;
pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
@@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_pause = pl08x_pause;
pl08x->slave.device_resume = pl08x_resume;
pl08x->slave.device_terminate_all = pl08x_terminate_all;
+ pl08x->slave.device_synchronize = pl08x_synchronize;
pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.directions =
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6204cc32d09c..847f84a41a69 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- bcm2835_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c->chan_base);
@@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void bcm2835_dma_synchronize(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
int irq, unsigned int irq_flags)
{
@@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
od->ddev.device_config = bcm2835_dma_slave_config;
od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
+ od->ddev.device_synchronize = bcm2835_dma_synchronize;
od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index f7e965f63274..d9bee65a18a4 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -934,7 +934,7 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
ARRAY_SIZE(am335x_usb_queues_tx));
- if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
+ if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
return false;
cchan->q_num = queues[cchan->port_num].submit;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7373b7a555ec..85820a2d69d4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
/* Clear the DMA status and stop the transfer. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
if (jzchan->desc) {
- jz4780_dma_desc_free(&jzchan->desc->vdesc);
+ vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
@@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void jz4780_dma_synchronize(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ vchan_synchronize(&jzchan->vchan);
+}
+
static int jz4780_dma_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
dd->device_config = jz4780_dma_config;
dd->device_terminate_all = jz4780_dma_terminate_all;
+ dd->device_synchronize = jz4780_dma_synchronize;
dd->device_tx_status = jz4780_dma_tx_status;
dd->device_issue_pending = jz4780_dma_issue_pending;
dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ec5f9d2bc820..80cc2be6483c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -355,7 +355,7 @@ static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
struct dmatest_thread *thread =
- container_of(arg, struct dmatest_thread, done_wait);
+ container_of(done, struct dmatest_thread, test_done);
if (!thread->done) {
done->done = true;
wake_up_all(done->wait);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 9364a3ed345a..948df1ab5f1a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan)
/* Move the cyclic channel back to default queue */
if (!echan->tc && echan->edesc->cyclic)
edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
- /*
- * free the running request descriptor
- * since it is not in any of the vdesc lists
- */
- edma_desc_free(&echan->edesc->vdesc);
+
+ vchan_terminate_vdesc(&echan->edesc->vdesc);
echan->edesc = NULL;
}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0391f930aecc..25cec9c243e1 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
static int mdc_terminate_all(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
- struct mdc_tx_desc *mdesc;
unsigned long flags;
LIST_HEAD(head);
@@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan)
mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
MDC_CONTROL_AND_STATUS);
- mdesc = mchan->desc;
- mchan->desc = NULL;
+ if (mchan->desc) {
+ vchan_terminate_vdesc(&mchan->desc->vd);
+ mchan->desc = NULL;
+ }
vchan_get_all_descriptors(&mchan->vc, &head);
mdc_get_new_events(mchan);
spin_unlock_irqrestore(&mchan->vc.lock, flags);
- if (mdesc)
- mdc_desc_free(&mdesc->vd);
vchan_dma_desc_free_list(&mchan->vc, &head);
return 0;
}
+static void mdc_synchronize(struct dma_chan *chan)
+{
+ struct mdc_chan *mchan = to_mdc_chan(chan);
+
+ vchan_synchronize(&mchan->vc);
+}
+
static int mdc_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
mdma->dma_dev.device_terminate_all = mdc_terminate_all;
+ mdma->dma_dev.device_synchronize = mdc_synchronize;
mdma->dma_dev.device_config = mdc_slave_config;
mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 2184881afe76..e7db24c67030 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1939,4 +1939,10 @@ module_platform_driver(sdma_driver);
MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX SDMA driver");
+#if IS_ENABLED(CONFIG_SOC_IMX6Q)
+MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
+#endif
+#if IS_ENABLED(CONFIG_SOC_IMX7D)
+MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
+#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 58d4ccd33672..8b5b23a8ace9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -597,7 +597,6 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
dump_desc_dbg(ioat_chan, desc);
@@ -715,7 +714,6 @@ static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
for (i = 1; i < active; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 01d2a750a621..26b67455208f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
c->phy = NULL;
p->vchan = NULL;
if (p->ds_run) {
- k3_dma_free_desc(&p->ds_run->vd);
+ vchan_terminate_vdesc(&p->ds_run->vd);
p->ds_run = NULL;
}
p->ds_done = NULL;
@@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void k3_dma_synchronize(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int k3_dma_transfer_pause(struct dma_chan *chan)
{
struct k3_dma_chan *c = to_k3_chan(chan);
@@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_pause = k3_dma_transfer_pause;
d->slave.device_resume = k3_dma_transfer_resume;
d->slave.device_terminate_all = k3_dma_terminate_all;
+ d->slave.device_synchronize = k3_dma_synchronize;
d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
/* init virtual channel */
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 5ba5714d0b7c..94d7bd7d2880 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -480,9 +480,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
mic_dma_intr_handler, mic_dma_thread_fn,
"mic dma_channel", ch, ch->ch_num);
- if (IS_ERR(ch->cookie))
- return PTR_ERR(ch->cookie);
- return 0;
+ return PTR_ERR_OR_ZERO(ch->cookie);
}
static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index f6dd849159d8..d21c19822feb 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- omap_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e3669850aef4..963cc5228d05 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -50,6 +50,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -104,6 +105,10 @@ static unsigned int nr_desc_prm;
module_param(nr_desc_prm, uint, 0644);
MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+enum hidma_cap {
+ HIDMA_MSI_CAP = 1,
+ HIDMA_IDENTITY_CAP,
+};
/* process completed descriptors */
static void hidma_process_completed(struct hidma_chan *mchan)
@@ -736,25 +741,12 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#endif
}
-static bool hidma_msi_capable(struct device *dev)
+static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- const char *of_compat;
- int ret = -EINVAL;
-
- if (!adev || acpi_disabled) {
- ret = device_property_read_string(dev, "compatible",
- &of_compat);
- if (ret)
- return false;
+ enum hidma_cap cap;
- ret = strcmp(of_compat, "qcom,hidma-1.1");
- } else {
-#ifdef CONFIG_ACPI
- ret = strcmp(acpi_device_hid(adev), "QCOM8062");
-#endif
- }
- return ret == 0;
+ cap = (enum hidma_cap) device_get_match_data(dev);
+ return cap ? ((cap & test_cap) > 0) : 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -834,8 +826,7 @@ static int hidma_probe(struct platform_device *pdev)
* Determine the MSI capability of the platform. Old HW doesn't
* support MSI.
*/
- msi = hidma_msi_capable(&pdev->dev);
-
+ msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -848,7 +839,10 @@ static int hidma_probe(struct platform_device *pdev)
if (!dmadev->nr_descriptors)
dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
- dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+ if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
+ dmadev->chidx = readl(dmadev->dev_trca + 0x40);
+ else
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
/* Set DMA mask to 64 bits. */
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -953,7 +947,8 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
- {"QCOM8062"},
+ {"QCOM8062", HIDMA_MSI_CAP},
+ {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
{},
};
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
@@ -961,7 +956,9 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
- {.compatible = "qcom,hidma-1.1",},
+ {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
+ {.compatible = "qcom,hidma-1.2",
+ .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 4999e266b2de..7c6e2ff212a2 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
*/
static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
+ unsigned long irqflags;
+
if (cause & HIDMA_ERR_INT_MASK) {
dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause);
@@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
return;
}
+ spin_lock_irqsave(&lldev->lock, irqflags);
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ spin_unlock_irqrestore(&lldev->lock, irqflags);
+
/*
* Fine tuned for this HW...
*
@@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
* Try to consume as many EVREs as possible.
*/
hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
}
irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 7335e2eb9b72..000c7019ca7d 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/module.h>
@@ -356,67 +357,37 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
{
struct platform_device *pdev_parent = of_find_device_by_node(np);
struct platform_device_info pdevinfo;
- struct of_phandle_args out_irq;
struct device_node *child;
- struct resource *res = NULL;
- const __be32 *cell;
- int ret = 0, size, i, num;
- u64 addr, addr_size;
+ struct resource *res;
+ int ret = 0;
+
+ /* allocate a resource array */
+ res = kcalloc(3, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
for_each_available_child_of_node(np, child) {
- struct resource *res_iter;
struct platform_device *new_pdev;
- cell = of_get_property(child, "reg", &size);
- if (!cell) {
- ret = -EINVAL;
+ ret = of_address_to_resource(child, 0, &res[0]);
+ if (!ret)
goto out;
- }
-
- size /= sizeof(*cell);
- num = size /
- (of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
- /* allocate a resource array */
- res = kcalloc(num, sizeof(*res), GFP_KERNEL);
- if (!res) {
- ret = -ENOMEM;
+ ret = of_address_to_resource(child, 1, &res[1]);
+ if (!ret)
goto out;
- }
-
- /* read each reg value */
- i = 0;
- res_iter = res;
- while (i < size) {
- addr = of_read_number(&cell[i],
- of_n_addr_cells(child));
- i += of_n_addr_cells(child);
-
- addr_size = of_read_number(&cell[i],
- of_n_size_cells(child));
- i += of_n_size_cells(child);
-
- res_iter->start = addr;
- res_iter->end = res_iter->start + addr_size - 1;
- res_iter->flags = IORESOURCE_MEM;
- res_iter++;
- }
- ret = of_irq_parse_one(child, 0, &out_irq);
- if (ret)
+ ret = of_irq_to_resource(child, 0, &res[2]);
+ if (ret <= 0)
goto out;
- res_iter->start = irq_create_of_mapping(&out_irq);
- res_iter->name = "hidma event irq";
- res_iter->flags = IORESOURCE_IRQ;
-
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.fwnode = &child->fwnode;
pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
pdevinfo.name = child->name;
pdevinfo.id = object_counter++;
pdevinfo.res = res;
- pdevinfo.num_res = num;
+ pdevinfo.num_res = 3;
pdevinfo.data = NULL;
pdevinfo.size_data = 0;
pdevinfo.dma_mask = DMA_BIT_MASK(64);
@@ -434,8 +405,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
*/
of_msi_configure(&new_pdev->dev, child);
of_node_put(child);
- kfree(res);
- res = NULL;
}
out:
kfree(res);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index f04c4702d98b..cd92d696bcf9 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
/* Dequeue current job */
if (s3cchan->at) {
- s3c24xx_dma_desc_free(&s3cchan->at->vd);
+ vchan_terminate_vdesc(&s3cchan->at->vd);
s3cchan->at = NULL;
}
@@ -744,6 +744,13 @@ unlock:
return ret;
}
+static void s3c24xx_dma_synchronize(struct dma_chan *chan)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+
+ vchan_synchronize(&s3cchan->vc);
+}
+
static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
/* Initialize slave engine for SoC internal dedicated peripherals */
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
s3cdma->slave.filter.map = pdata->slave_map;
s3cdma->slave.filter.mapcnt = pdata->slavecnt;
s3cdma->slave.filter.fn = s3c24xx_dma_filter;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b2c7db3e480..e3ff162c03fc 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -741,6 +742,41 @@ static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
/* -----------------------------------------------------------------------------
* Stop and reset
*/
+static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
+{
+ u32 chcr;
+ unsigned int i;
+
+ /*
+ * Ensure that the setting of the DE bit is actually 0 after
+ * clearing it.
+ */
+ for (i = 0; i < 1024; i++) {
+ chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+ udelay(1);
+ }
+
+ dev_err(chan->chan.device->dev, "CHCR DE check error\n");
+}
+
+static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
+{
+ u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+
+ /* set DE=0 and flush remaining data */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
+
+ /* make sure all remaining data was flushed */
+ rcar_dmac_chcr_de_barrier(chan);
+
+ /* back DE */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+}
static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
{
@@ -749,6 +785,7 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+ rcar_dmac_chcr_de_barrier(chan);
}
static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
@@ -1309,8 +1346,11 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
residue += chunk->size;
}
+ if (desc->direction == DMA_DEV_TO_MEM)
+ rcar_dmac_sync_tcr(chan);
+
/* Add the residue for the current chunk. */
- residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
+ residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
return residue;
}
@@ -1481,6 +1521,8 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
if (chcr & RCAR_DMACHCR_TE)
mask |= RCAR_DMACHCR_DE;
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
+ if (mask & RCAR_DMACHCR_DE)
+ rcar_dmac_chcr_de_barrier(chan);
if (chcr & RCAR_DMACHCR_DSE)
ret |= rcar_dmac_isr_desc_stage_end(chan);
@@ -1615,22 +1657,6 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
-static int rcar_dmac_sleep_suspend(struct device *dev)
-{
- /*
- * TODO: Wait for the current transfer to complete and stop the device.
- */
- return 0;
-}
-
-static int rcar_dmac_sleep_resume(struct device *dev)
-{
- /* TODO: Resume transfers, if any. */
- return 0;
-}
-#endif
-
#ifdef CONFIG_PM
static int rcar_dmac_runtime_suspend(struct device *dev)
{
@@ -1646,7 +1672,13 @@ static int rcar_dmac_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rcar_dmac_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+ /*
+ * TODO for system sleep/resume:
+ * - Wait for the current transfer to complete and stop the device,
+ * - Resume transfers, if any.
+ */
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index b652071a2096..b106e8a60af6 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -710,7 +710,7 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
return 0;
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index d5db0f6e1ff8..4dbb30cf94ac 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -253,9 +253,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
iomem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index b9d75a54c896..9a558e30c461 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -353,7 +353,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
- if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
+ sconfig->device_fc) {
if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
return -EINVAL;
tdc->slave_id = sconfig->slave_id;
@@ -970,8 +971,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= TEGRA_APBDMA_CSR_ONCE;
+
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
@@ -1110,10 +1116,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_FLOW;
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 7df910e7c348..9272b173c746 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -54,7 +54,15 @@ struct ti_am335x_xbar_map {
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val, iomem + event);
+ /*
+ * TPCC_EVT_MUX_60_63 register layout is different than the
+ * rest, in the sense, that event 63 is mapped to lowest byte
+ * and event 60 is mapped to highest, handle it separately.
+ */
+ if (event >= 60 && event <= 63)
+ writeb_relaxed(val, iomem + (63 - event % 4));
+ else
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 896bafb7a532..395c698edb4d 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -422,7 +422,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
break;
else {
dev_err(chan2dev(chan),
- "Couldnt allocate any descriptors\n");
+ "Couldn't allocate any descriptors\n");
return -ENOMEM;
}
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 545e97279083..88ad8ed2a8d6 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
- if (dmaengine_desc_test_reuse(&vd->tx))
- list_add(&vd->node, &vc->desc_allocated);
- else
- vc->desc_free(vd);
+ vchan_vdesc_fini(vd);
dmaengine_desc_callback_invoke(&cb, NULL);
}
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 3f776a46a29c..b09b75ab0751 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -35,6 +35,7 @@ struct virt_dma_chan {
struct list_head desc_completed;
struct virt_dma_desc *cyclic;
+ struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -104,6 +105,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
}
/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
+}
+
+/**
* vchan_cyclic_callback - report the completion of a period
* @vd: virtual descriptor
*/
@@ -116,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
}
/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ /* free up stuck descriptor */
+ if (vc->vd_terminated)
+ vchan_vdesc_fini(vc->vd_terminated);
+
+ vc->vd_terminated = vd;
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
* vchan_next_desc - peek at the next descriptor to be processed
* @vc: virtual channel to obtain descriptor from
*
@@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
* Makes sure that all scheduled or active callbacks have finished running. For
* proper operation the caller has to ensure that no new callbacks are scheduled
* after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ unsigned long flags;
+
tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ if (vc->vd_terminated) {
+ vchan_vdesc_fini(vc->vd_terminated);
+ vc->vd_terminated = NULL;
+ }
+ spin_unlock_irqrestore(&vc->lock, flags);
}
#endif
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5eef13380ca8..27b523530c4a 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -99,7 +99,9 @@
#define XILINX_DMA_REG_FRMPTR_STS 0x0024
#define XILINX_DMA_REG_PARK_PTR 0x0028
#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
#define XILINX_DMA_REG_VDMA_VERSION 0x002c
/* Register Direct Mode Registers */
@@ -163,6 +165,7 @@
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
#define XILINX_DMA_COALESCE_MAX 255
+#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
/* Multi-Channel DMA Descriptor offsets*/
@@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @pad1: Reserved @0x10
- * @pad2: Reserved @0x14
+ * @mcdma_control: Control field for mcdma @0x10
+ * @vsize_stride: Vsize and Stride field for mcdma @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw {
/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
- * @next_descmsb: Next Descriptor Pointer MSB @0x04
+ * @next_desc_msb: Next Descriptor Pointer MSB @0x04
* @src_addr: Source address @0x08
- * @src_addrmsb: Source address MSB @0x0C
+ * @src_addr_msb: Source address MSB @0x0C
* @dest_addr: Destination address @0x10
- * @dest_addrmsb: Destination address MSB @0x14
+ * @dest_addr_msb: Destination address MSB @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
*/
@@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor {
* @pending_list: Descriptors waiting
* @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
+ * @free_seg_list: Free descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
* @dev: The dma device
@@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor {
* @cyclic: Check for cyclic transfers.
* @genlock: Support genlock mode
* @err: Channel has errors
+ * @idle: Check for channel idle
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor {
* @desc_submitcount: Descriptor h/w submitted count
* @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @cyclic_seg_p: Physical allocated segments base for cyclic dma
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
+ * @tdest: TDEST value for mcdma
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -341,6 +349,7 @@ struct xilinx_dma_chan {
struct list_head pending_list;
struct list_head active_list;
struct list_head done_list;
+ struct list_head free_seg_list;
struct dma_chan common;
struct dma_pool *desc_pool;
struct device *dev;
@@ -352,6 +361,7 @@ struct xilinx_dma_chan {
bool cyclic;
bool genlock;
bool err;
+ bool idle;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -360,18 +370,20 @@ struct xilinx_dma_chan {
u32 desc_submitcount;
u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
+ dma_addr_t cyclic_seg_p;
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
};
/**
- * enum xdma_ip_type: DMA IP type.
+ * enum xdma_ip_type - DMA IP type.
*
- * XDMA_TYPE_AXIDMA: Axi dma ip.
- * XDMA_TYPE_CDMA: Axi cdma ip.
- * XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIDMA: Axi dma ip.
+ * @XDMA_TYPE_CDMA: Axi cdma ip.
+ * @XDMA_TYPE_VDMA: Axi vdma ip.
*
*/
enum xdma_ip_type {
@@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
{
- struct xilinx_axidma_tx_segment *segment;
- dma_addr_t phys;
-
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
+ unsigned long flags;
- segment->phys = phys;
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_axidma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
return segment;
}
+static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
@@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
struct xilinx_axidma_tx_segment *segment)
{
- dma_pool_free(chan->desc_pool, segment, segment->phys);
+ xilinx_dma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
}
/**
@@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
dev_dbg(chan->dev, "Free all channel resources.\n");
xilinx_dma_free_descriptors(chan);
+
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
- xilinx_dma_free_tx_segment(chan, chan->seg_v);
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
+ chan->seg_p);
+
+ /* Free Memory that is allocated for cyclic DMA Mode */
+ dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
+ chan->cyclic_seg_v, chan->cyclic_seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
}
- dma_pool_destroy(chan->desc_pool);
- chan->desc_pool = NULL;
}
/**
@@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ int i;
/* Has this channel already been allocated? */
if (chan->desc_pool)
@@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* for meeting Xilinx VDMA specification requirement.
*/
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_axidma_tx_segment),
- __alignof__(struct xilinx_axidma_tx_segment),
- 0);
+ /* Allocate the buffer descriptors. */
+ chan->seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_v[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_v[i].node,
+ &chan->free_seg_list);
+ }
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
@@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
0);
}
- if (!chan->desc_pool) {
+ if (!chan->desc_pool &&
+ (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
chan->id);
@@ -855,22 +919,20 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/*
- * For AXI DMA case after submitting a pending_list, keep
- * an extra segment allocated so that the "next descriptor"
- * pointer on the tail descriptor always points to a
- * valid descriptor, even when paused after reaching taildesc.
- * This way, it is possible to issue additional
- * transfers without halting and restarting the channel.
- */
- chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
-
- /*
* For cyclic DMA mode we need to program the tail Descriptor
* register with a value which is not a part of the BD chain
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
- chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+ chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p, GFP_KERNEL);
+ if (!chan->cyclic_seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate desc segment for cyclic DMA\n");
+ return -ENOMEM;
+ }
+ chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
}
dma_cookie_init(dchan);
@@ -936,34 +998,10 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
}
/**
- * xilinx_dma_is_running - Check if DMA channel is running
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if running, '0' if not.
- */
-static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
-{
- return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_HALTED) &&
- (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
- XILINX_DMA_DMACR_RUNSTOP);
-}
-
-/**
- * xilinx_dma_is_idle - Check if DMA channel is idle
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
-static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
-{
- return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_IDLE;
-}
-
-/**
* xilinx_dma_stop_transfer - Halt DMA channel
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
/**
* xilinx_cdma_stop_transfer - Wait for the current transfer to complete
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
- u32 reg;
+ u32 reg, j;
struct xilinx_vdma_tx_segment *tail_segment;
/* This function was invoked with lock held */
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- return;
- }
-
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
@@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /* Configure channel to allow number frame buffers */
- dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
- chan->desc_pendingcount);
-
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
@@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- if (config->park && (config->park_frm >= 0) &&
- (config->park_frm < chan->num_frms)) {
- if (chan->direction == DMA_MEM_TO_DEV)
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
- else
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
}
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
@@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->has_sg) {
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
@@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- }
- if (!chan->has_sg) {
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
chan->desc_submitcount++;
chan->desc_pendingcount--;
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
- } else {
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
}
+
+ chan->idle = false;
}
/**
@@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
}
if (chan->has_sg) {
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
@@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+ struct xilinx_axidma_tx_segment *tail_segment;
u32 reg;
if (chan->err)
@@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (list_empty(&chan->pending_list))
return;
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
+ if (!chan->idle)
return;
- }
head_desc = list_first_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
@@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_axidma_tx_segment, node);
- if (chan->has_sg && !chan->xdev->mcdma) {
- old_head = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- new_head = chan->seg_v;
- /* Copy Buffer Descriptor fields. */
- new_head->hw = old_head->hw;
-
- /* Swap and save new reserve */
- list_replace_init(&old_head->node, &new_head->node);
- chan->seg_v = old_head;
-
- tail_segment->hw.next_desc = chan->seg_v->phys;
- head_desc->async_tx.phys = new_head->phys;
- }
-
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
@@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
}
chan->err = false;
+ chan->idle = true;
+ chan->desc_submitcount = 0;
return err;
}
@@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
chan->start_transfer(chan);
spin_unlock(&chan->lock);
}
@@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_vdma_tx_segment *segment;
struct xilinx_vdma_desc_hw *hw;
if (!is_slave_direction(xt->dir))
@@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
- prev = segment;
-
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
@@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
u32 *app_w = (u32 *)context;
struct scatterlist *sg;
size_t copy;
@@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
XILINX_DMA_NUM_APP_WORDS);
}
- if (prev)
- prev->hw.next_desc = segment->phys;
-
- prev = segment;
sg_used += copy;
/*
@@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment = list_first_entry(&desc->segments,
struct xilinx_axidma_tx_segment, node);
desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if (chan->direction == DMA_MEM_TO_DEV) {
@@ -1821,11 +1839,14 @@ error:
/**
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
+ * @dchan: DMA channel
+ * @buf_addr: Physical address of the buffer
+ * @buf_len: Total length of the cyclic buffers
+ * @period_len: length of individual cyclic buffer
* @direction: DMA direction
* @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
@@ -2009,7 +2030,9 @@ error:
/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific DMA Channel pointer
+ * @dchan: Driver specific DMA Channel pointer
+ *
+ * Return: '0' always.
*/
static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{
@@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
+ chan->idle = true;
if (chan->cyclic) {
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
@@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
chan->cyclic = false;
}
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
return 0;
}
@@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
+ * @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
@@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
chan->ext_addr = xdev->ext_addr;
+ /* This variable ensures that descriptors are not
+ * Submitted when dma engine is in progress. This variable is
+ * Added to avoid polling for a bit in the status register to
+ * Know dma state in the driver hot path.
+ */
+ chan->idle = true;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->free_seg_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
@@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
@@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
@@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
* Return: 0 always.
*/
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
- struct device_node *node) {
+ struct device_node *node)
+{
int ret, i, nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
@@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
goto error;
}
- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
return 0;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 1ee1241ca797..f14645817ed8 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pm_runtime.h>
#include "../dmaengine.h"
@@ -47,6 +48,7 @@
#define ZYNQMP_DMA_SRC_START_MSB 0x15C
#define ZYNQMP_DMA_DST_START_LSB 0x160
#define ZYNQMP_DMA_DST_START_MSB 0x164
+#define ZYNQMP_DMA_TOTAL_BYTE 0x188
#define ZYNQMP_DMA_RATE_CTRL 0x18C
#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
@@ -138,6 +140,8 @@
#define ZYNQMP_DMA_BUS_WIDTH_64 64
#define ZYNQMP_DMA_BUS_WIDTH_128 128
+#define ZDMA_PM_TIMEOUT 100
+
#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
@@ -211,8 +215,6 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
- * @clk_main: Pointer to main clock
- * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_chan {
struct zynqmp_dma_device *zdev;
@@ -237,8 +239,6 @@ struct zynqmp_dma_chan {
u32 bus_width;
u32 src_burst_len;
u32 dst_burst_len;
- struct clk *clk_main;
- struct clk *clk_apb;
};
/**
@@ -246,11 +246,15 @@ struct zynqmp_dma_chan {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_device {
struct device *dev;
struct dma_device common;
struct zynqmp_dma_chan *chan;
+ struct clk *clk_main;
+ struct clk *clk_apb;
};
static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
@@ -461,7 +465,11 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
struct zynqmp_dma_desc_sw *desc;
- int i;
+ int i, ret;
+
+ ret = pm_runtime_get_sync(chan->dev);
+ if (ret < 0)
+ return ret;
chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
GFP_KERNEL);
@@ -506,6 +514,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
{
writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
chan->idle = false;
writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
}
@@ -517,12 +526,12 @@ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
*/
static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
{
- u32 val;
-
+ if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
}
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
@@ -545,6 +554,8 @@ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
* zynqmp_dma_device_config - Zynqmp dma device configuration
* @dchan: DMA channel
* @config: DMA device config
+ *
+ * Return: 0 always
*/
static int zynqmp_dma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
@@ -640,7 +651,7 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
/**
* zynqmp_dma_free_descriptors - Free channel descriptors
- * @dchan: DMA channel pointer
+ * @chan: ZynqMP DMA channel pointer
*/
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
{
@@ -664,6 +675,8 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p);
kfree(chan->sw_desc_pool);
+ pm_runtime_mark_last_busy(chan->dev);
+ pm_runtime_put_autosuspend(chan->dev);
}
/**
@@ -715,7 +728,7 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
if (status & ZYNQMP_DMA_INT_OVRFL) {
zynqmp_dma_handle_ovfl_int(chan, status);
- dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
+ dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
ret = IRQ_HANDLED;
}
@@ -838,11 +851,10 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
if (!chan)
return;
- devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ if (chan->irq)
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
tasklet_kill(&chan->tasklet);
list_del(&chan->common.device_node);
- clk_disable_unprepare(chan->clk_apb);
- clk_disable_unprepare(chan->clk_main);
}
/**
@@ -907,30 +919,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
"zynqmp-dma", chan);
if (err)
return err;
- chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
- if (IS_ERR(chan->clk_main)) {
- dev_err(&pdev->dev, "main clock not found.\n");
- return PTR_ERR(chan->clk_main);
- }
-
- chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
- if (IS_ERR(chan->clk_apb)) {
- dev_err(&pdev->dev, "apb clock not found.\n");
- return PTR_ERR(chan->clk_apb);
- }
-
- err = clk_prepare_enable(chan->clk_main);
- if (err) {
- dev_err(&pdev->dev, "Unable to enable main clock.\n");
- return err;
- }
-
- err = clk_prepare_enable(chan->clk_apb);
- if (err) {
- clk_disable_unprepare(chan->clk_main);
- dev_err(&pdev->dev, "Unable to enable apb clock.\n");
- return err;
- }
chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
chan->idle = true;
@@ -953,6 +941,87 @@ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
}
/**
+ * zynqmp_dma_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_resume - Resume from suspend
+ * @dev: Address of the device structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_resume(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(zdev->clk_main);
+ clk_disable_unprepare(zdev->clk_apb);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(zdev->clk_main);
+ if (err) {
+ dev_err(dev, "Unable to enable main clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(zdev->clk_apb);
+ if (err) {
+ dev_err(dev, "Unable to enable apb clock.\n");
+ clk_disable_unprepare(zdev->clk_main);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
+ SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
+ zynqmp_dma_runtime_resume, NULL)
+};
+
+/**
* zynqmp_dma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
@@ -984,12 +1053,33 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p->device_config = zynqmp_dma_device_config;
p->dev = &pdev->dev;
+ zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+ if (IS_ERR(zdev->clk_main)) {
+ dev_err(&pdev->dev, "main clock not found.\n");
+ return PTR_ERR(zdev->clk_main);
+ }
+
+ zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+ if (IS_ERR(zdev->clk_apb)) {
+ dev_err(&pdev->dev, "apb clock not found.\n");
+ return PTR_ERR(zdev->clk_apb);
+ }
+
platform_set_drvdata(pdev, zdev);
+ pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(zdev->dev);
+ pm_runtime_enable(zdev->dev);
+ pm_runtime_get_sync(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev)) {
+ ret = zynqmp_dma_runtime_resume(zdev->dev);
+ if (ret)
+ return ret;
+ }
ret = zynqmp_dma_chan_probe(zdev, pdev);
if (ret) {
dev_err(&pdev->dev, "Probing channel failed\n");
- goto free_chan_resources;
+ goto err_disable_pm;
}
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
@@ -1005,12 +1095,19 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
goto free_chan_resources;
}
+ pm_runtime_mark_last_busy(zdev->dev);
+ pm_runtime_put_sync_autosuspend(zdev->dev);
+
dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
return 0;
free_chan_resources:
zynqmp_dma_chan_remove(zdev->chan);
+err_disable_pm:
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
return ret;
}
@@ -1028,6 +1125,9 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
+ pm_runtime_disable(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
return 0;
}
@@ -1042,6 +1142,7 @@ static struct platform_driver zynqmp_dma_driver = {
.driver = {
.name = "xilinx-zynqmp-dma",
.of_match_table = zynqmp_dma_of_match,
+ .pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 96afb2aeed18..3c4017007647 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -457,4 +457,11 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_TI
+ tristate "Texas Instruments DDR3 ECC Controller"
+ depends on ARCH_KEYSTONE || SOC_DRA7XX
+ help
+ Support for error detection and correction on the
+ TI SoCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 0fd9ffa63299..b54912eb39af 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_TI) += ti_edac.o
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index ec5d695bbb72..3c68bb525d5d 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 9c1ffe3e912b..aeb222ca3ed1 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
+ int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
new file mode 100644
index 000000000000..6ac26d1b929f
--- /dev/null
+++ b/drivers/edac/ti_edac.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Texas Instruments DDR3 ECC error correction and detection driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+#include "edac_module.h"
+
+/* EMIF controller registers */
+#define EMIF_SDRAM_CONFIG 0x008
+#define EMIF_IRQ_STATUS 0x0ac
+#define EMIF_IRQ_ENABLE_SET 0x0b4
+#define EMIF_ECC_CTRL 0x110
+#define EMIF_1B_ECC_ERR_CNT 0x130
+#define EMIF_1B_ECC_ERR_THRSH 0x134
+#define EMIF_1B_ECC_ERR_ADDR_LOG 0x13c
+#define EMIF_2B_ECC_ERR_ADDR_LOG 0x140
+
+/* Bit definitions for EMIF_SDRAM_CONFIG */
+#define SDRAM_TYPE_SHIFT 29
+#define SDRAM_TYPE_MASK GENMASK(31, 29)
+#define SDRAM_TYPE_DDR3 (3 << SDRAM_TYPE_SHIFT)
+#define SDRAM_TYPE_DDR2 (2 << SDRAM_TYPE_SHIFT)
+#define SDRAM_NARROW_MODE_MASK GENMASK(15, 14)
+#define SDRAM_K2_NARROW_MODE_SHIFT 12
+#define SDRAM_K2_NARROW_MODE_MASK GENMASK(13, 12)
+#define SDRAM_ROWSIZE_SHIFT 7
+#define SDRAM_ROWSIZE_MASK GENMASK(9, 7)
+#define SDRAM_IBANK_SHIFT 4
+#define SDRAM_IBANK_MASK GENMASK(6, 4)
+#define SDRAM_K2_IBANK_SHIFT 5
+#define SDRAM_K2_IBANK_MASK GENMASK(6, 5)
+#define SDRAM_K2_EBANK_SHIFT 3
+#define SDRAM_K2_EBANK_MASK BIT(SDRAM_K2_EBANK_SHIFT)
+#define SDRAM_PAGESIZE_SHIFT 0
+#define SDRAM_PAGESIZE_MASK GENMASK(2, 0)
+#define SDRAM_K2_PAGESIZE_SHIFT 0
+#define SDRAM_K2_PAGESIZE_MASK GENMASK(1, 0)
+
+#define EMIF_1B_ECC_ERR_THRSH_SHIFT 24
+
+/* IRQ bit definitions */
+#define EMIF_1B_ECC_ERR BIT(5)
+#define EMIF_2B_ECC_ERR BIT(4)
+#define EMIF_WR_ECC_ERR BIT(3)
+#define EMIF_SYS_ERR BIT(0)
+/* Bit 31 enables ECC and 28 enables RMW */
+#define ECC_ENABLED (BIT(31) | BIT(28))
+
+#define EDAC_MOD_NAME "ti-emif-edac"
+
+enum {
+ EMIF_TYPE_DRA7,
+ EMIF_TYPE_K2
+};
+
+struct ti_edac {
+ void __iomem *reg;
+};
+
+static u32 ti_edac_readl(struct ti_edac *edac, u16 offset)
+{
+ return readl_relaxed(edac->reg + offset);
+}
+
+static void ti_edac_writel(struct ti_edac *edac, u32 val, u16 offset)
+{
+ writel_relaxed(val, edac->reg + offset);
+}
+
+static irqreturn_t ti_edac_isr(int irq, void *data)
+{
+ struct mem_ctl_info *mci = data;
+ struct ti_edac *edac = mci->pvt_info;
+ u32 irq_status;
+ u32 err_addr;
+ int err_count;
+
+ irq_status = ti_edac_readl(edac, EMIF_IRQ_STATUS);
+
+ if (irq_status & EMIF_1B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_1B_ECC_ERR_ADDR_LOG);
+ err_count = ti_edac_readl(edac, EMIF_1B_ECC_ERR_CNT);
+ ti_edac_writel(edac, err_count, EMIF_1B_ECC_ERR_CNT);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "1B");
+ }
+
+ if (irq_status & EMIF_2B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_2B_ECC_ERR_ADDR_LOG);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "2B");
+ }
+
+ if (irq_status & EMIF_WR_ECC_ERR)
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ 0, 0, -1, 0, 0, 0,
+ mci->ctl_name, "WR");
+
+ ti_edac_writel(edac, irq_status, EMIF_IRQ_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
+{
+ struct dimm_info *dimm;
+ struct ti_edac *edac = mci->pvt_info;
+ int bits;
+ u32 val;
+ u32 memsize;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
+
+ val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
+
+ if (type == EMIF_TYPE_DRA7) {
+ bits = ((val & SDRAM_PAGESIZE_MASK) >> SDRAM_PAGESIZE_SHIFT) + 8;
+ bits += ((val & SDRAM_ROWSIZE_MASK) >> SDRAM_ROWSIZE_SHIFT) + 9;
+ bits += (val & SDRAM_IBANK_MASK) >> SDRAM_IBANK_SHIFT;
+
+ if (val & SDRAM_NARROW_MODE_MASK) {
+ bits++;
+ dimm->dtype = DEV_X16;
+ } else {
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ }
+ } else {
+ bits = 16;
+ bits += ((val & SDRAM_K2_PAGESIZE_MASK) >>
+ SDRAM_K2_PAGESIZE_SHIFT) + 8;
+ bits += (val & SDRAM_K2_IBANK_MASK) >> SDRAM_K2_IBANK_SHIFT;
+ bits += (val & SDRAM_K2_EBANK_MASK) >> SDRAM_K2_EBANK_SHIFT;
+
+ val = (val & SDRAM_K2_NARROW_MODE_MASK) >>
+ SDRAM_K2_NARROW_MODE_SHIFT;
+ switch (val) {
+ case 0:
+ bits += 3;
+ dimm->dtype = DEV_X64;
+ break;
+ case 1:
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ break;
+ case 2:
+ bits++;
+ dimm->dtype = DEV_X16;
+ break;
+ }
+ }
+
+ memsize = 1 << bits;
+
+ dimm->nr_pages = memsize >> PAGE_SHIFT;
+ dimm->grain = 4;
+ if ((val & SDRAM_TYPE_MASK) == SDRAM_TYPE_DDR2)
+ dimm->mtype = MEM_DDR2;
+ else
+ dimm->mtype = MEM_DDR3;
+
+ val = ti_edac_readl(edac, EMIF_ECC_CTRL);
+ if (val & ECC_ENABLED)
+ dimm->edac_mode = EDAC_SECDED;
+ else
+ dimm->edac_mode = EDAC_NONE;
+}
+
+static const struct of_device_id ti_edac_of_match[] = {
+ { .compatible = "ti,emif-keystone", .data = (void *)EMIF_TYPE_K2 },
+ { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
+ {},
+};
+
+static int _emif_get_id(struct device_node *node)
+{
+ struct device_node *np;
+ const __be32 *addrp;
+ u32 addr, my_addr;
+ int my_id = 0;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ my_addr = (u32)of_translate_address(node, addrp);
+
+ for_each_matching_node(np, ti_edac_of_match) {
+ if (np == node)
+ continue;
+
+ addrp = of_get_address(np, 0, NULL, NULL);
+ addr = (u32)of_translate_address(np, addrp);
+
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "addr=%x, my_addr=%x\n",
+ addr, my_addr);
+
+ if (addr < my_addr)
+ my_id++;
+ }
+
+ return my_id;
+}
+
+static int ti_edac_probe(struct platform_device *pdev)
+{
+ int error_irq = 0, ret = -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *reg;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ const struct of_device_id *id;
+ struct ti_edac *edac;
+ int emif_id;
+
+ id = of_match_device(ti_edac_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg)) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF controller regs not defined\n");
+ return PTR_ERR(reg);
+ }
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = 1;
+
+ /* Allocate ID number for our EMIF controller */
+ emif_id = _emif_get_id(pdev->dev.of_node);
+ if (emif_id < 0)
+ return -EINVAL;
+
+ mci = edac_mc_alloc(emif_id, 1, layers, sizeof(*edac));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ edac = mci->pvt_info;
+ edac->reg = reg;
+ platform_set_drvdata(pdev, mci);
+
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED | EDAC_FLAG_NONE;
+ mci->mod_name = EDAC_MOD_NAME;
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ /* Setup memory layout */
+ ti_edac_setup_dimm(mci, (u32)(id->data));
+
+ /* add EMIF ECC error handler */
+ error_irq = platform_get_irq(pdev, 0);
+ if (!error_irq) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF irq number not defined.\n");
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, error_irq, ti_edac_isr, 0,
+ "emif-edac-irq", mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "request_irq fail for EMIF EDAC irq\n");
+ goto err;
+ }
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "Failed to register mci: %d.\n", ret);
+ goto err;
+ }
+
+ /* Generate an interrupt with each 1b error */
+ ti_edac_writel(edac, 1 << EMIF_1B_ECC_ERR_THRSH_SHIFT,
+ EMIF_1B_ECC_ERR_THRSH);
+
+ /* Enable interrupts */
+ ti_edac_writel(edac,
+ EMIF_1B_ECC_ERR | EMIF_2B_ECC_ERR | EMIF_WR_ECC_ERR,
+ EMIF_IRQ_ENABLE_SET);
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+ return ret;
+}
+
+static int ti_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver ti_edac_driver = {
+ .probe = ti_edac_probe,
+ .remove = ti_edac_remove,
+ .driver = {
+ .name = EDAC_MOD_NAME,
+ .of_match_table = ti_edac_of_match,
+ },
+};
+
+module_platform_driver(ti_edac_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Texas Instruments DDR3 MC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 981fba56bc18..1621f2f7f129 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -24,8 +24,6 @@
#include <linux/notifier.h>
#include <linux/extcon-provider.h>
#include <linux/regmap.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/mfd/axp20x.h>
/* Power source status register */
@@ -79,11 +77,6 @@ enum axp288_extcon_reg {
AXP288_BC_DET_STAT_REG = 0x2f,
};
-enum axp288_mux_select {
- EXTCON_GPIO_MUX_SEL_PMIC = 0,
- EXTCON_GPIO_MUX_SEL_SOC,
-};
-
enum axp288_extcon_irq {
VBUS_FALLING_IRQ = 0,
VBUS_RISING_IRQ,
@@ -104,10 +97,8 @@ struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- struct gpio_desc *gpio_mux_cntl;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
- struct notifier_block extcon_nb;
unsigned int previous_cable;
};
@@ -197,15 +188,6 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
}
no_vbus:
- /*
- * If VBUS is absent Connect D+/D- lines to PMIC for BC
- * detection. Else connect them to SOC for USB communication.
- */
- if (info->gpio_mux_cntl)
- gpiod_set_value(info->gpio_mux_cntl,
- vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
- : EXTCON_GPIO_MUX_SEL_PMIC);
-
extcon_set_state_sync(info->edev, info->previous_cable, false);
if (info->previous_cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -253,8 +235,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
- int ret, i, pirq, gpio;
+ int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -264,8 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE;
- if (pdata)
- info->gpio_mux_cntl = pdata->gpio_mux_cntl;
platform_set_drvdata(pdev, info);
@@ -286,21 +265,11 @@ static int axp288_extcon_probe(struct platform_device *pdev)
return ret;
}
- /* Set up gpio control for USB Mux */
- if (info->gpio_mux_cntl) {
- gpio = desc_to_gpio(info->gpio_mux_cntl);
- ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to request the gpio=%d\n", gpio);
- return ret;
- }
- gpiod_direction_output(info->gpio_mux_cntl,
- EXTCON_GPIO_MUX_SEL_PMIC);
- }
-
for (i = 0; i < EXTCON_IRQ_END; i++) {
pirq = platform_get_irq(pdev, i);
+ if (pirq < 0)
+ return pirq;
+
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_err(&pdev->dev,
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 6187f731b29d..6721ab01fe7d 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -34,16 +34,26 @@ struct cros_ec_extcon_info {
struct notifier_block notifier;
+ unsigned int dr; /* data role */
+ bool pr; /* power role (true if VBUS enabled) */
bool dp; /* DisplayPort enabled */
bool mux; /* SuperSpeed (usb3) enabled */
unsigned int power_type;
};
static const unsigned int usb_type_c_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
EXTCON_DISP_DP,
EXTCON_NONE,
};
+enum usb_data_roles {
+ DR_NONE,
+ DR_HOST,
+ DR_DEVICE,
+};
+
/**
* cros_ec_pd_command() - Send a command to the EC.
* @info: pointer to struct cros_ec_extcon_info
@@ -150,6 +160,7 @@ static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
pd_control.port = info->port_id;
pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ pd_control.swap = USB_PD_CTRL_SWAP_NONE;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
&pd_control, sizeof(pd_control),
&resp, sizeof(resp));
@@ -183,11 +194,72 @@ static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
return resp.num_ports;
}
+static const char *cros_ec_usb_role_string(unsigned int role)
+{
+ return role == DR_NONE ? "DISCONNECTED" :
+ (role == DR_HOST ? "DFP" : "UFP");
+}
+
+static const char *cros_ec_usb_power_type_string(unsigned int type)
+{
+ switch (type) {
+ case USB_CHG_TYPE_NONE:
+ return "USB_CHG_TYPE_NONE";
+ case USB_CHG_TYPE_PD:
+ return "USB_CHG_TYPE_PD";
+ case USB_CHG_TYPE_PROPRIETARY:
+ return "USB_CHG_TYPE_PROPRIETARY";
+ case USB_CHG_TYPE_C:
+ return "USB_CHG_TYPE_C";
+ case USB_CHG_TYPE_BC12_DCP:
+ return "USB_CHG_TYPE_BC12_DCP";
+ case USB_CHG_TYPE_BC12_CDP:
+ return "USB_CHG_TYPE_BC12_CDP";
+ case USB_CHG_TYPE_BC12_SDP:
+ return "USB_CHG_TYPE_BC12_SDP";
+ case USB_CHG_TYPE_OTHER:
+ return "USB_CHG_TYPE_OTHER";
+ case USB_CHG_TYPE_VBUS:
+ return "USB_CHG_TYPE_VBUS";
+ case USB_CHG_TYPE_UNKNOWN:
+ return "USB_CHG_TYPE_UNKNOWN";
+ default:
+ return "USB_CHG_TYPE_UNKNOWN";
+ }
+}
+
+static bool cros_ec_usb_power_type_is_wall_wart(unsigned int type,
+ unsigned int role)
+{
+ switch (type) {
+ /* FIXME : Guppy, Donnettes, and other chargers will be miscategorized
+ * because they identify with USB_CHG_TYPE_C, but we can't return true
+ * here from that code because that breaks Suzy-Q and other kinds of
+ * USB Type-C cables and peripherals.
+ */
+ case USB_CHG_TYPE_PROPRIETARY:
+ case USB_CHG_TYPE_BC12_DCP:
+ return true;
+ case USB_CHG_TYPE_PD:
+ case USB_CHG_TYPE_C:
+ case USB_CHG_TYPE_BC12_CDP:
+ case USB_CHG_TYPE_BC12_SDP:
+ case USB_CHG_TYPE_OTHER:
+ case USB_CHG_TYPE_VBUS:
+ case USB_CHG_TYPE_UNKNOWN:
+ case USB_CHG_TYPE_NONE:
+ default:
+ return false;
+ }
+}
+
static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
bool force)
{
struct device *dev = info->dev;
int role, power_type;
+ unsigned int dr = DR_NONE;
+ bool pr = false;
bool polarity = false;
bool dp = false;
bool mux = false;
@@ -206,9 +278,12 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dev_err(dev, "failed getting role err = %d\n", role);
return role;
}
+ dev_dbg(dev, "disconnected\n");
} else {
int pd_mux_state;
+ dr = (role & PD_CTRL_RESP_ROLE_DATA) ? DR_HOST : DR_DEVICE;
+ pr = (role & PD_CTRL_RESP_ROLE_POWER);
pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
if (pd_mux_state < 0)
pd_mux_state = USB_PD_MUX_USB_ENABLED;
@@ -216,20 +291,62 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
- }
- if (force || info->dp != dp || info->mux != mux ||
- info->power_type != power_type) {
+ dev_dbg(dev,
+ "connected role 0x%x pwr type %d dr %d pr %d pol %d mux %d dp %d hpd %d\n",
+ role, power_type, dr, pr, polarity, mux, dp, hpd);
+ }
+ /*
+ * When there is no USB host (e.g. USB PD charger),
+ * we are not really a UFP for the AP.
+ */
+ if (dr == DR_DEVICE &&
+ cros_ec_usb_power_type_is_wall_wart(power_type, role))
+ dr = DR_NONE;
+
+ if (force || info->dr != dr || info->pr != pr || info->dp != dp ||
+ info->mux != mux || info->power_type != power_type) {
+ bool host_connected = false, device_connected = false;
+
+ dev_dbg(dev, "Type/Role switch! type = %s role = %s\n",
+ cros_ec_usb_power_type_string(power_type),
+ cros_ec_usb_role_string(dr));
+ info->dr = dr;
+ info->pr = pr;
info->dp = dp;
info->mux = mux;
info->power_type = power_type;
- extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ if (dr == DR_DEVICE)
+ device_connected = true;
+ else if (dr == DR_HOST)
+ host_connected = true;
+ extcon_set_state(info->edev, EXTCON_USB, device_connected);
+ extcon_set_state(info->edev, EXTCON_USB_HOST, host_connected);
+ extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS,
(union extcon_property_value)(int)mux);
@@ -237,6 +354,8 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
EXTCON_PROP_DISP_HPD,
(union extcon_property_value)(int)hpd);
+ extcon_sync(info->edev, EXTCON_USB);
+ extcon_sync(info->edev, EXTCON_USB_HOST);
extcon_sync(info->edev, EXTCON_DISP_DP);
} else if (hpd) {
@@ -322,13 +441,28 @@ static int extcon_cros_ec_probe(struct platform_device *pdev)
return ret;
}
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_DISP_HPD);
+ info->dr = DR_NONE;
+ info->pr = false;
+
platform_set_drvdata(pdev, info);
/* Get PD events from the EC */
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index a301fcf46e88..523391bb3fbe 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1784,10 +1784,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
{
struct client *client = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &client->wait, pt);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 180f0a96528c..fee2e9e7ea20 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -328,11 +328,11 @@ nosy_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int
+static __poll_t
nosy_poll(struct file *file, poll_table *pt)
{
struct client *client = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &client->buffer.wait, pt);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index fa87a055905e..e77f77caa0f3 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -48,6 +48,14 @@ config ARM_SCPI_POWER_DOMAIN
This enables support for the SCPI power domains which can be
enabled or disabled via the SCP firmware
+config ARM_SDE_INTERFACE
+ bool "ARM Software Delegated Exception Interface (SDEI)"
+ depends on ARM64
+ help
+ The Software Delegated Exception Interface (SDEI) is an ARM
+ standard for registering callbacks from the platform firmware
+ into the OS. This is typically used to implement RAS notifications.
+
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index feaa890197f3..b248238ddc6a 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
new file mode 100644
index 000000000000..1ea71640fdc2
--- /dev/null
+++ b/drivers/firmware/arm_sdei.c
@@ -0,0 +1,1092 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+/*
+ * The call to use to reach the firmware.
+ */
+static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res);
+
+/* entry point from firmware to arch asm code */
+static unsigned long sdei_entry_point;
+
+struct sdei_event {
+ /* These three are protected by the sdei_list_lock */
+ struct list_head list;
+ bool reregister;
+ bool reenable;
+
+ u32 event_num;
+ u8 type;
+ u8 priority;
+
+ /* This pointer is handed to firmware as the event argument. */
+ union {
+ /* Shared events */
+ struct sdei_registered_event *registered;
+
+ /* CPU private events */
+ struct sdei_registered_event __percpu *private_registered;
+ };
+};
+
+/* Take the mutex for any API call or modification. Take the mutex first. */
+static DEFINE_MUTEX(sdei_events_lock);
+
+/* and then hold this when modifying the list */
+static DEFINE_SPINLOCK(sdei_list_lock);
+static LIST_HEAD(sdei_list);
+
+/* Private events are registered/enabled via IPI passing one of these */
+struct sdei_crosscall_args {
+ struct sdei_event *event;
+ atomic_t errors;
+ int first_error;
+};
+
+#define CROSSCALL_INIT(arg, event) (arg.event = event, \
+ arg.first_error = 0, \
+ atomic_set(&arg.errors, 0))
+
+static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ on_each_cpu(fn, &arg, true);
+
+ return arg.first_error;
+}
+
+static inline void
+sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
+{
+ if (err && (atomic_inc_return(&arg->errors) == 1))
+ arg->first_error = err;
+}
+
+static int sdei_to_linux_errno(unsigned long sdei_err)
+{
+ switch (sdei_err) {
+ case SDEI_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case SDEI_INVALID_PARAMETERS:
+ return -EINVAL;
+ case SDEI_DENIED:
+ return -EPERM;
+ case SDEI_PENDING:
+ return -EINPROGRESS;
+ case SDEI_OUT_OF_RESOURCE:
+ return -ENOMEM;
+ }
+
+ /* Not an error value ... */
+ return sdei_err;
+}
+
+/*
+ * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
+ * to translate.
+ */
+static int sdei_is_err(struct arm_smccc_res *res)
+{
+ switch (res->a0) {
+ case SDEI_NOT_SUPPORTED:
+ case SDEI_INVALID_PARAMETERS:
+ case SDEI_DENIED:
+ case SDEI_PENDING:
+ case SDEI_OUT_OF_RESOURCE:
+ return true;
+ }
+
+ return false;
+}
+
+static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ u64 *result)
+{
+ int err = 0;
+ struct arm_smccc_res res;
+
+ if (sdei_firmware_call) {
+ sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
+ &res);
+ if (sdei_is_err(&res))
+ err = sdei_to_linux_errno(res.a0);
+ } else {
+ /*
+ * !sdei_firmware_call means we failed to probe or called
+ * sdei_mark_interface_broken(). -EIO is not an error returned
+ * by sdei_to_linux_errno() and is used to suppress messages
+ * from this driver.
+ */
+ err = -EIO;
+ res.a0 = SDEI_NOT_SUPPORTED;
+ }
+
+ if (result)
+ *result = res.a0;
+
+ return err;
+}
+
+static struct sdei_event *sdei_event_find(u32 event_num)
+{
+ struct sdei_event *e, *found = NULL;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(e, &sdei_list, list) {
+ if (e->event_num == event_num) {
+ found = e;
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return found;
+}
+
+int sdei_api_event_context(u32 query, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
+ result);
+}
+NOKPROBE_SYMBOL(sdei_api_event_context);
+
+static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
+ 0, 0, result);
+}
+
+static struct sdei_event *sdei_event_create(u32 event_num,
+ sdei_event_callback *cb,
+ void *cb_arg)
+{
+ int err;
+ u64 result;
+ struct sdei_event *event;
+ struct sdei_registered_event *reg;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&event->list);
+ event->event_num = event_num;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->priority = result;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->type = result;
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ reg->event_num = event_num;
+ reg->priority = event->priority;
+
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ event->registered = reg;
+ } else {
+ int cpu;
+ struct sdei_registered_event __percpu *regs;
+
+ regs = alloc_percpu(struct sdei_registered_event);
+ if (!regs) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_possible_cpu(cpu) {
+ reg = per_cpu_ptr(regs, cpu);
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ }
+
+ event->private_registered = regs;
+ }
+
+ if (sdei_event_find(event_num)) {
+ kfree(event->registered);
+ kfree(event);
+ event = ERR_PTR(-EBUSY);
+ } else {
+ spin_lock(&sdei_list_lock);
+ list_add(&event->list, &sdei_list);
+ spin_unlock(&sdei_list_lock);
+ }
+
+ return event;
+}
+
+static void sdei_event_destroy(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_del(&event->list);
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ kfree(event->registered);
+ else
+ free_percpu(event->private_registered);
+
+ kfree(event);
+}
+
+static int sdei_api_get_version(u64 *version)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
+}
+
+int sdei_mask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to mask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_mask_cpu(void *ignored)
+{
+ sdei_mask_local_cpu();
+}
+
+int sdei_unmask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to unmask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_unmask_cpu(void *ignored)
+{
+ sdei_unmask_local_cpu();
+}
+
+static void _ipi_private_reset(void *ignored)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ NULL);
+ if (err && err != -EIO)
+ pr_warn_once("failed to reset CPU[%u]: %d\n",
+ smp_processor_id(), err);
+}
+
+static int sdei_api_shared_reset(void)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
+ NULL);
+}
+
+static void sdei_mark_interface_broken(void)
+{
+ pr_err("disabling SDEI firmware interface\n");
+ on_each_cpu(&_ipi_mask_cpu, NULL, true);
+ sdei_firmware_call = NULL;
+}
+
+static int sdei_platform_reset(void)
+{
+ int err;
+
+ on_each_cpu(&_ipi_private_reset, NULL, true);
+ err = sdei_api_shared_reset();
+ if (err) {
+ pr_err("Failed to reset platform: %d\n", err);
+ sdei_mark_interface_broken();
+ }
+
+ return err;
+}
+
+static int sdei_api_event_enable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
+ 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_enable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_enable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_enable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_enable);
+
+static int sdei_api_event_disable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
+ 0, 0, NULL);
+}
+
+static void _ipi_event_disable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_disable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_disable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_disable(event->event_num);
+ else
+ err = sdei_do_cross_call(_ipi_event_disable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_disable);
+
+static int sdei_api_event_unregister(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
+ 0, 0, 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_unregister(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_unregister(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_unregister(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_unregister(event->event_num);
+
+ return sdei_do_cross_call(_local_event_unregister, event);
+}
+
+int sdei_event_unregister(u32 event_num)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ do {
+ if (!event) {
+ pr_warn("Event %u not registered\n", event_num);
+ err = -ENOENT;
+ break;
+ }
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+
+ sdei_event_destroy(event);
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_unregister);
+
+/*
+ * unregister events, but don't destroy them as they are re-registered by
+ * sdei_reregister_shared().
+ */
+static int sdei_unregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
+ void *arg, u64 flags, u64 affinity)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
+ (unsigned long)entry_point, (unsigned long)arg,
+ flags, affinity, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_register(void *data)
+{
+ int err;
+ struct sdei_registered_event *reg;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON(preemptible());
+
+ reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ reg, 0, 0);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_register(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_register(event->event_num,
+ sdei_entry_point,
+ event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+
+
+ err = sdei_do_cross_call(_local_event_register, event);
+ if (err) {
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ sdei_do_cross_call(_local_event_unregister, event);
+ }
+
+ return err;
+}
+
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ do {
+ if (sdei_event_find(event_num)) {
+ pr_warn("Event %u already registered\n", event_num);
+ err = -EBUSY;
+ break;
+ }
+
+ event = sdei_event_create(event_num, cb, arg);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ pr_warn("Failed to create event %u: %d\n", event_num,
+ err);
+ break;
+ }
+
+ err = _sdei_event_register(event);
+ if (err) {
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num,
+ err);
+ }
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_register);
+
+static int sdei_reregister_event(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ err = _sdei_event_register(event);
+ if (err) {
+ pr_err("Failed to re-register event %u\n", event->event_num);
+ sdei_event_destroy(event);
+ return err;
+ }
+
+ if (event->reenable) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ }
+
+ if (err)
+ pr_err("Failed to re-enable event %u\n", event->event_num);
+
+ return err;
+}
+
+static int sdei_reregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_reregister_event(event);
+ if (err)
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_cpuhp_down(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* un-register private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_unregister(&arg);
+ if (arg.first_error)
+ pr_err("Failed to unregister event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_mask_local_cpu();
+}
+
+static int sdei_cpuhp_up(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* re-register/enable private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_register(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-register event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+
+ if (event->reenable) {
+ CROSSCALL_INIT(arg, event);
+ _local_event_enable(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-enable event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_unmask_local_cpu();
+}
+
+/* When entering idle, mask/unmask events for this cpu */
+static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ int rv;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ rv = sdei_mask_local_cpu();
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ rv = sdei_unmask_local_cpu();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (rv)
+ return notifier_from_errno(rv);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_pm_nb = {
+ .notifier_call = sdei_pm_notifier,
+};
+
+static int sdei_device_suspend(struct device *dev)
+{
+ on_each_cpu(_ipi_mask_cpu, NULL, true);
+
+ return 0;
+}
+
+static int sdei_device_resume(struct device *dev)
+{
+ on_each_cpu(_ipi_unmask_cpu, NULL, true);
+
+ return 0;
+}
+
+/*
+ * We need all events to be reregistered when we resume from hibernate.
+ *
+ * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
+ * events during freeze, then re-register and re-enable them during thaw
+ * and restore.
+ */
+static int sdei_device_freeze(struct device *dev)
+{
+ int err;
+
+ /* unregister private events */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ err = sdei_unregister_shared();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sdei_device_thaw(struct device *dev)
+{
+ int err;
+
+ /* re-register shared events */
+ err = sdei_reregister_shared();
+ if (err) {
+ pr_warn("Failed to re-register shared events...\n");
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err)
+ pr_warn("Failed to re-register CPU hotplug notifier...\n");
+
+ return err;
+}
+
+static int sdei_device_restore(struct device *dev)
+{
+ int err;
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ return sdei_device_thaw(dev);
+}
+
+static const struct dev_pm_ops sdei_pm_ops = {
+ .suspend = sdei_device_suspend,
+ .resume = sdei_device_resume,
+ .freeze = sdei_device_freeze,
+ .thaw = sdei_device_thaw,
+ .restore = sdei_device_restore,
+};
+
+/*
+ * Mask all CPUs and unregister all events on panic, reboot or kexec.
+ */
+static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ /*
+ * We are going to reset the interface, after this there is no point
+ * doing work when we take CPUs offline.
+ */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ sdei_platform_reset();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_reboot_nb = {
+ .notifier_call = sdei_reboot_notifier,
+};
+
+static void sdei_smccc_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static void sdei_smccc_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static int sdei_get_conduit(struct platform_device *pdev)
+{
+ const char *method;
+ struct device_node *np = pdev->dev.of_node;
+
+ sdei_firmware_call = NULL;
+ if (np) {
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return CONDUIT_INVALID;
+ }
+
+ if (!strcmp("hvc", method)) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else if (!strcmp("smc", method)) {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
+ if (acpi_psci_use_hvc()) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+ }
+
+ return CONDUIT_INVALID;
+}
+
+static int sdei_probe(struct platform_device *pdev)
+{
+ int err;
+ u64 ver = 0;
+ int conduit;
+
+ conduit = sdei_get_conduit(pdev);
+ if (!sdei_firmware_call)
+ return 0;
+
+ err = sdei_api_get_version(&ver);
+ if (err == -EOPNOTSUPP)
+ pr_err("advertised but not implemented in platform firmware\n");
+ if (err) {
+ pr_err("Failed to get SDEI version: %d\n", err);
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
+ (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
+ (int)SDEI_VERSION_VENDOR(ver));
+
+ if (SDEI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Conflicting SDEI version detected.\n");
+ sdei_mark_interface_broken();
+ return -EINVAL;
+ }
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ sdei_entry_point = sdei_arch_get_entry_point(conduit);
+ if (!sdei_entry_point) {
+ /* Not supported due to hardware or boot configuration */
+ sdei_mark_interface_broken();
+ return 0;
+ }
+
+ err = cpu_pm_register_notifier(&sdei_pm_nb);
+ if (err) {
+ pr_warn("Failed to register CPU PM notifier...\n");
+ goto error;
+ }
+
+ err = register_reboot_notifier(&sdei_reboot_nb);
+ if (err) {
+ pr_warn("Failed to register reboot notifier...\n");
+ goto remove_cpupm;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err) {
+ pr_warn("Failed to register CPU hotplug notifier...\n");
+ goto remove_reboot;
+ }
+
+ return 0;
+
+remove_reboot:
+ unregister_reboot_notifier(&sdei_reboot_nb);
+
+remove_cpupm:
+ cpu_pm_unregister_notifier(&sdei_pm_nb);
+
+error:
+ sdei_mark_interface_broken();
+ return err;
+}
+
+static const struct of_device_id sdei_of_match[] = {
+ { .compatible = "arm,sdei-1.0" },
+ {}
+};
+
+static struct platform_driver sdei_driver = {
+ .driver = {
+ .name = "sdei",
+ .pm = &sdei_pm_ops,
+ .of_match_table = sdei_of_match,
+ },
+ .probe = sdei_probe,
+};
+
+static bool __init sdei_present_dt(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np, *fw_np;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return false;
+
+ np = of_find_matching_node(fw_np, sdei_of_match);
+ of_node_put(fw_np);
+ if (!np)
+ return false;
+
+ pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL);
+ of_node_put(np);
+ if (!pdev)
+ return false;
+
+ return true;
+}
+
+static bool __init sdei_present_acpi(void)
+{
+ acpi_status status;
+ struct platform_device *pdev;
+ struct acpi_table_header *sdei_table_header;
+
+ if (acpi_disabled)
+ return false;
+
+ status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
+ }
+ if (ACPI_FAILURE(status))
+ return false;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
+ 0);
+ if (IS_ERR(pdev))
+ return false;
+
+ return true;
+}
+
+static int __init sdei_init(void)
+{
+ if (sdei_present_dt() || sdei_present_acpi())
+ platform_driver_register(&sdei_driver);
+
+ return 0;
+}
+
+/*
+ * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
+ * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
+ * by device_initcall(). We want to be called in the middle.
+ */
+subsys_initcall_sync(sdei_init);
+
+int sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ int err;
+ mm_segment_t orig_addr_limit;
+ u32 event_num = arg->event_num;
+
+ orig_addr_limit = get_fs();
+ set_fs(USER_DS);
+
+ err = arg->callback(event_num, regs, arg->callback_arg);
+ if (err)
+ pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
+ event_num, smp_processor_id(), err);
+
+ set_fs(orig_addr_limit);
+
+ return err;
+}
+NOKPROBE_SYMBOL(sdei_event_handler);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 2b4c39fdfa91..6047ed4e8a3d 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -159,13 +159,21 @@ config RESET_ATTACK_MITIGATION
using the TCG Platform Reset Attack Mitigation specification. This
protects against an attacker forcibly rebooting the system while it
still contains secrets in RAM, booting another OS and extracting the
- secrets.
+ secrets. This should only be enabled when userland is configured to
+ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+ have been evicted, since otherwise it will trigger even on clean
+ reboots.
endmenu
config UEFI_CPER
bool
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
config EFI_DEV_PATH_PARSER
bool
depends on ACPI
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 269501dfba53..cb805374f4bc 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -11,7 +11,7 @@
KASAN_SANITIZE_runtime-wrappers.o := n
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
obj-$(CONFIG_EFI) += capsule.o memmap.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
@@ -30,3 +30,4 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 055e2e8f985a..e456f4602df1 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -45,7 +45,7 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
if (pages_needed == 0) {
- pr_err("invalid capsule size");
+ pr_err("invalid capsule size\n");
return -EINVAL;
}
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 000000000000..698e5c8e0c8d
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,356 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+#define INDENT_SP " "
+
+static const char * const arm_reg_ctx_strs[] = {
+ "AArch32 general purpose registers",
+ "AArch32 EL1 context registers",
+ "AArch32 EL2 context registers",
+ "AArch32 secure context registers",
+ "AArch64 general purpose registers",
+ "AArch64 EL1 context registers",
+ "AArch64 EL2 context registers",
+ "AArch64 EL3 context registers",
+ "Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Eviction",
+ "Snooping (processor initiated a cache snoop that resulted in an error)",
+ "Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+ "Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Local management operation (processor initiated a TLB management operation that resulted in an error)",
+ "External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+ "Local processor originated request",
+ "Local processor responded to request",
+ "Local processor observed",
+ "Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+ "External Memory Access",
+ "Internal Memory Access",
+ "Unknown",
+ "Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+ u64 error_info)
+{
+ u8 trans_type, op_type, level, participation_type, address_space;
+ u16 mem_attributes;
+ bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+ bool time_out, access_mode;
+
+ /* If the type is unknown, bail. */
+ if (type > CPER_ARM_MAX_TYPE)
+ return;
+
+ /*
+ * Vendor type errors have error information values that are vendor
+ * specific.
+ */
+ if (type == CPER_ARM_VENDOR_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+ trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+ & CPER_ARM_ERR_TRANSACTION_MASK);
+ if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+ printk("%stransaction type: %s\n", pfx,
+ arm_err_trans_type_strs[trans_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+ op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+ & CPER_ARM_ERR_OPERATION_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_cache_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_TLB_ERROR:
+ if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_tlb_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_BUS_ERROR:
+ if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_bus_err_op_strs[op_type]);
+ }
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+ level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+ & CPER_ARM_ERR_LEVEL_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ printk("%scache level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_TLB_ERROR:
+ printk("%sTLB level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_BUS_ERROR:
+ printk("%saffinity level at which the bus error occurred: %d\n",
+ pfx, level);
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+ proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+ & CPER_ARM_ERR_PC_CORRUPT_MASK);
+ if (proc_context_corrupt)
+ printk("%sprocessor context corrupted\n", pfx);
+ else
+ printk("%sprocessor context not corrupted\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+ corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+ & CPER_ARM_ERR_CORRECTED_MASK);
+ if (corrected)
+ printk("%sthe error has been corrected\n", pfx);
+ else
+ printk("%sthe error has not been corrected\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+ precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+ & CPER_ARM_ERR_PRECISE_PC_MASK);
+ if (precise_pc)
+ printk("%sPC is precise\n", pfx);
+ else
+ printk("%sPC is imprecise\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+ restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+ & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+ if (restartable_pc)
+ printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+ }
+
+ /* The rest of the fields are specific to bus errors */
+ if (type != CPER_ARM_BUS_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+ participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+ & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+ if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+ printk("%sparticipation type: %s\n", pfx,
+ arm_bus_err_part_type_strs[participation_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+ time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+ & CPER_ARM_ERR_TIME_OUT_MASK);
+ if (time_out)
+ printk("%srequest timed out\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+ address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+ & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+ if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+ printk("%saddress space: %s\n", pfx,
+ arm_bus_err_addr_space_strs[address_space]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+ mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+ & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+ printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+ access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+ & CPER_ARM_ERR_ACCESS_MODE_MASK);
+ if (access_mode)
+ printk("%saccess mode: normal\n", pfx);
+ else
+ printk("%saccess mode: secure\n", pfx);
+ }
+}
+
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc)
+{
+ int i, len, max_ctx_type;
+ struct cper_arm_err_info *err_info;
+ struct cper_arm_ctx_info *ctx_info;
+ char newpfx[64], infopfx[64];
+
+ printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+ len = proc->section_length - (sizeof(*proc) +
+ proc->err_info_num * (sizeof(*err_info)));
+ if (len < 0) {
+ printk("%ssection length: %d\n", pfx, proc->section_length);
+ printk("%ssection length is too small\n", pfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+ return;
+ }
+
+ if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+ printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+ pfx, proc->mpidr);
+
+ if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+ printk("%serror affinity level: %d\n", pfx,
+ proc->affinity_level);
+
+ if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+ printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+ printk("%sPower State Coordination Interface state: %d\n",
+ pfx, proc->psci_state);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
+
+ err_info = (struct cper_arm_err_info *)(proc + 1);
+ for (i = 0; i < proc->err_info_num; i++) {
+ printk("%sError info structure %d:\n", pfx, i);
+
+ printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+ printk("%sfirst error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+ printk("%slast error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+ printk("%spropagated error captured\n",
+ newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+ printk("%soverflow occurred, error info is incomplete\n",
+ newpfx);
+ }
+
+ printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+ err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_info->type] : "unknown");
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+ printk("%serror_info: 0x%016llx\n", newpfx,
+ err_info->error_info);
+ snprintf(infopfx, sizeof(infopfx), "%s%s", newpfx, INDENT_SP);
+ cper_print_arm_err_info(infopfx, err_info->type,
+ err_info->error_info);
+ }
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+ printk("%svirtual fault address: 0x%016llx\n",
+ newpfx, err_info->virt_fault_addr);
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+ printk("%sphysical fault address: 0x%016llx\n",
+ newpfx, err_info->physical_fault_addr);
+ err_info += 1;
+ }
+
+ ctx_info = (struct cper_arm_ctx_info *)err_info;
+ max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+ for (i = 0; i < proc->context_info_num; i++) {
+ int size = sizeof(*ctx_info) + ctx_info->size;
+
+ printk("%sContext info structure %d:\n", pfx, i);
+ if (len < size) {
+ printk("%ssection length is too small\n", newpfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ return;
+ }
+ if (ctx_info->type > max_ctx_type) {
+ printk("%sInvalid context type: %d (max: %d)\n",
+ newpfx, ctx_info->type, max_ctx_type);
+ return;
+ }
+ printk("%sregister context type: %s\n", newpfx,
+ arm_reg_ctx_strs[ctx_info->type]);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (ctx_info + 1), ctx_info->size, 0);
+ len -= size;
+ ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+ }
+
+ if (len > 0) {
+ printk("%sVendor specific error info has %u bytes:\n", pfx,
+ len);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+ len, true);
+ }
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index d2fcafcea07e..c165933ebf38 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -122,7 +122,7 @@ static const char * const proc_isa_strs[] = {
"ARM A64",
};
-static const char * const proc_error_type_strs[] = {
+const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
@@ -157,8 +157,8 @@ static void cper_print_proc_generic(const char *pfx,
if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
cper_print_bits(pfx, proc->proc_error_type,
- proc_error_type_strs,
- ARRAY_SIZE(proc_error_type_strs));
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
}
if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
printk("%s""operation: %d, %s\n", pfx, proc->operation,
@@ -188,122 +188,6 @@ static void cper_print_proc_generic(const char *pfx,
printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-static const char * const arm_reg_ctx_strs[] = {
- "AArch32 general purpose registers",
- "AArch32 EL1 context registers",
- "AArch32 EL2 context registers",
- "AArch32 secure context registers",
- "AArch64 general purpose registers",
- "AArch64 EL1 context registers",
- "AArch64 EL2 context registers",
- "AArch64 EL3 context registers",
- "Misc. system register structure",
-};
-
-static void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc)
-{
- int i, len, max_ctx_type;
- struct cper_arm_err_info *err_info;
- struct cper_arm_ctx_info *ctx_info;
- char newpfx[64];
-
- printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
-
- len = proc->section_length - (sizeof(*proc) +
- proc->err_info_num * (sizeof(*err_info)));
- if (len < 0) {
- printk("%ssection length: %d\n", pfx, proc->section_length);
- printk("%ssection length is too small\n", pfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
- return;
- }
-
- if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
- printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
- pfx, proc->mpidr);
-
- if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
- printk("%serror affinity level: %d\n", pfx,
- proc->affinity_level);
-
- if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
- printk("%srunning state: 0x%x\n", pfx, proc->running_state);
- printk("%sPower State Coordination Interface state: %d\n",
- pfx, proc->psci_state);
- }
-
- snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
-
- err_info = (struct cper_arm_err_info *)(proc + 1);
- for (i = 0; i < proc->err_info_num; i++) {
- printk("%sError info structure %d:\n", pfx, i);
-
- printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
-
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
- if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
- printk("%sfirst error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
- printk("%slast error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
- printk("%spropagated error captured\n",
- newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
- printk("%soverflow occurred, error info is incomplete\n",
- newpfx);
- }
-
- printk("%serror_type: %d, %s\n", newpfx, err_info->type,
- err_info->type < ARRAY_SIZE(proc_error_type_strs) ?
- proc_error_type_strs[err_info->type] : "unknown");
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO)
- printk("%serror_info: 0x%016llx\n", newpfx,
- err_info->error_info);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
- printk("%svirtual fault address: 0x%016llx\n",
- newpfx, err_info->virt_fault_addr);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
- printk("%sphysical fault address: 0x%016llx\n",
- newpfx, err_info->physical_fault_addr);
- err_info += 1;
- }
-
- ctx_info = (struct cper_arm_ctx_info *)err_info;
- max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
- for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
-
- printk("%sContext info structure %d:\n", pfx, i);
- if (len < size) {
- printk("%ssection length is too small\n", newpfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- return;
- }
- if (ctx_info->type > max_ctx_type) {
- printk("%sInvalid context type: %d (max: %d)\n",
- newpfx, ctx_info->type, max_ctx_type);
- return;
- }
- printk("%sregister context type: %s\n", newpfx,
- arm_reg_ctx_strs[ctx_info->type]);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
- (ctx_info + 1), ctx_info->size, 0);
- len -= size;
- ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
- }
-
- if (len > 0) {
- printk("%sVendor specific error info has %u bytes:\n", pfx,
- len);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
- len, true);
- }
-}
-#endif
-
static const char * const mem_err_type_strs[] = {
"unknown",
"no error",
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 557a47829d03..cd42f66a7c85 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -52,6 +52,7 @@ struct efi __read_mostly efi = {
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
+ .tpm_log = EFI_INVALID_TABLE_ADDR
};
EXPORT_SYMBOL(efi);
@@ -464,6 +465,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
{NULL_GUID, NULL, NULL},
};
@@ -552,6 +554,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
if (efi_enabled(EFI_MEMMAP))
efi_memattr_init();
+ efi_tpm_eventlog_init();
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
@@ -608,7 +612,7 @@ static int __init efi_load_efivars(void)
return 0;
pdev = platform_device_register_simple("efivars", 0, NULL, 0);
- return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(efi_load_efivars);
#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index adaa4a964f0c..7b3ba40f0745 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -30,8 +30,7 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o gop.o secureboot.o
-lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 6224cdbc9669..da661bf8cb96 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -4,15 +4,18 @@
* Copyright (C) 2016 CoreOS, Inc
* Copyright (C) 2017 Google, Inc.
* Matthew Garrett <mjg59@google.com>
+ * Thiebaud Weksteen <tweek@google.com>
*
* This file is part of the Linux kernel, and is made available under the
* terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
#include <asm/efi.h>
#include "efistub.h"
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
static const efi_char16_t efi_MemoryOverWriteRequest_name[] = {
'M', 'e', 'm', 'o', 'r', 'y', 'O', 'v', 'e', 'r', 'w', 'r', 'i', 't',
'e', 'R', 'e', 'q', 'u', 'e', 's', 't', 'C', 'o', 'n', 't', 'r', 'o',
@@ -56,3 +59,81 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
}
+
+#endif
+
+void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+ efi_status_t status;
+ efi_physical_addr_t log_location, log_last_entry;
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+ efi_bool_t truncated;
+ void *tcg2_protocol;
+
+ status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
+ &tcg2_protocol);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
+ &log_location, &log_last_entry, &truncated);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (!log_location)
+ return;
+ first_entry_addr = (unsigned long) log_location;
+
+ /*
+ * We populate the EFI table even if the logs are empty.
+ */
+ if (!log_last_entry) {
+ log_size = 0;
+ } else {
+ last_entry_addr = (unsigned long) log_last_entry;
+ /*
+ * get_event_log only returns the address of the last entry.
+ * We need to calculate its size to deduce the full size of
+ * the logs.
+ */
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ log_size = log_last_entry - log_location + last_entry_size;
+ }
+
+ /* Allocate space for the logs and copy them. */
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size,
+ (void **) &log_tbl);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg,
+ "Unable to allocate memory for event log\n");
+ return;
+ }
+
+ memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
+ log_tbl->size = log_size;
+ log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
+
+ status = efi_call_early(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
+ if (status != EFI_SUCCESS)
+ goto err_free;
+ return;
+
+err_free:
+ efi_call_early(free_pool, log_tbl);
+}
+
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+{
+ /* Only try to retrieve the logs in 1.2 format. */
+ efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
+}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
new file mode 100644
index 000000000000..0cbeb3d46b18
--- /dev/null
+++ b/drivers/firmware/efi/tpm.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * Reserve the memory associated with the TPM Event Log configuration table.
+ */
+int __init efi_tpm_eventlog_init(void)
+{
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned int tbl_size;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
+ if (!log_tbl) {
+ pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
+ efi.tpm_log);
+ efi.tpm_log = EFI_INVALID_TABLE_ADDR;
+ return -ENOMEM;
+ }
+
+ tbl_size = sizeof(*log_tbl) + log_tbl->size;
+ memblock_reserve(efi.tpm_log, tbl_size);
+ early_memunmap(log_tbl, sizeof(*log_tbl));
+ return 0;
+}
+
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index d687ca3d5049..8b25d31e8401 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -496,6 +496,8 @@ static void __init psci_init_migrate(void)
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_ops.get_version = psci_get_version;
+
psci_function_id[PSCI_FN_CPU_SUSPEND] =
PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
psci_ops.cpu_suspend = psci_cpu_suspend;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index f3f4f810e5df..bb1c068bff19 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,8 +77,8 @@ static int psci_ops_check(void)
return 0;
}
-static int find_clusters(const struct cpumask *cpus,
- const struct cpumask **clusters)
+static int find_cpu_groups(const struct cpumask *cpus,
+ const struct cpumask **cpu_groups)
{
unsigned int nb = 0;
cpumask_var_t tmp;
@@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus,
cpumask_copy(tmp, cpus);
while (!cpumask_empty(tmp)) {
- const struct cpumask *cluster =
+ const struct cpumask *cpu_group =
topology_core_cpumask(cpumask_any(tmp));
- clusters[nb++] = cluster;
- cpumask_andnot(tmp, tmp, cluster);
+ cpu_groups[nb++] = cpu_group;
+ cpumask_andnot(tmp, tmp, cpu_group);
}
free_cpumask_var(tmp);
@@ -170,24 +170,24 @@ static int hotplug_tests(void)
{
int err;
cpumask_var_t offlined_cpus;
- int i, nb_cluster;
- const struct cpumask **clusters;
+ int i, nb_cpu_group;
+ const struct cpumask **cpu_groups;
char *page_buf;
err = -ENOMEM;
if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
return err;
- /* We may have up to nb_available_cpus clusters. */
- clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
- GFP_KERNEL);
- if (!clusters)
+ /* We may have up to nb_available_cpus cpu_groups. */
+ cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups)
goto out_free_cpus;
page_buf = (char *)__get_free_page(GFP_KERNEL);
if (!page_buf)
- goto out_free_clusters;
+ goto out_free_cpu_groups;
err = 0;
- nb_cluster = find_clusters(cpu_online_mask, clusters);
+ nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
/*
* Of course the last CPU cannot be powered down and cpu_down() should
@@ -197,24 +197,22 @@ static int hotplug_tests(void)
err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
- * Take down CPUs by cluster this time. When the last CPU is turned
- * off, the cluster itself should shut down.
+ * Take down CPUs by cpu group this time. When the last CPU is turned
+ * off, the cpu group itself should shut down.
*/
- for (i = 0; i < nb_cluster; ++i) {
- int cluster_id =
- topology_physical_package_id(cpumask_any(clusters[i]));
+ for (i = 0; i < nb_cpu_group; ++i) {
ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
- clusters[i]);
+ cpu_groups[i]);
/* Remove trailing newline. */
page_buf[len - 1] = '\0';
- pr_info("Trying to turn off and on again cluster %d "
- "(CPUs %s)\n", cluster_id, page_buf);
- err += down_and_up_cpus(clusters[i], offlined_cpus);
+ pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+ i, page_buf);
+ err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
}
free_page((unsigned long)page_buf);
-out_free_clusters:
- kfree(clusters);
+out_free_cpu_groups:
+ kfree(cpu_groups);
out_free_cpus:
free_cpumask_var(offlined_cpus);
return err;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d6a8e851ad13..8dbb2280538d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -122,12 +122,6 @@ config GPIO_ATH79
Select this option to enable GPIO driver for
Atheros AR71XX/AR724X/AR913X SoC devices.
-config GPIO_AXP209
- tristate "X-Powers AXP209 PMIC GPIO Support"
- depends on MFD_AXP20X
- help
- Say yes to enable GPIO support for the AXP209 PMIC
-
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -704,6 +698,22 @@ config GPIO_TS5500
blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
LCD port.
+config GPIO_WINBOND
+ tristate "Winbond Super I/O GPIO support"
+ depends on ISA_BUS_API
+ help
+ This option enables support for GPIOs found on Winbond Super I/O
+ chips.
+ Currently, only W83627UHG (also known as Nuvoton NCT6627UD) is
+ supported.
+
+ You will need to provide a module parameter "gpios", or a
+ boot-time parameter "gpio_winbond.gpios" with a bitmask of GPIO
+ ports to enable (bit 0 is GPIO1, bit 1 is GPIO2, etc.).
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-winbond.
+
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
depends on ISA_BUS_API
@@ -1234,6 +1244,16 @@ config GPIO_PCI_IDIO_16
low). Input filter control is not supported by this driver, and the
input filters are deactivated by this driver.
+config GPIO_PCIE_IDIO_24
+ tristate "ACCES PCIe-IDIO-24 GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the ACCES PCIe-IDIO-24 family (PCIe-IDIO-24,
+ PCIe-IDI-24, PCIe-IDO-24, PCIe-IDIO-12). An interrupt is generated
+ when any of the inputs change state (low to high or high to low).
+ Input filter control is not supported by this driver, and the input
+ filters are deactivated by this driver.
+
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
select MFD_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4bc24febb889..cccb0d40846c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
-obj-$(CONFIG_GPIO_AXP209) += gpio-axp209.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
@@ -96,6 +95,7 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
+obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
@@ -140,6 +140,7 @@ obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
+obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index afbff155a0ba..e82cc763633c 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -125,6 +125,48 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
EXPORT_SYMBOL(devm_gpiod_get_index);
/**
+ * devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @dev: device for lifecycle management
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc **dr;
+ struct gpio_desc *desc;
+
+ dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
+ if (IS_ERR(desc)) {
+ devres_free(dr);
+ return desc;
+ }
+
+ *dr = desc;
+ devres_add(dev, dr);
+
+ return desc;
+}
+EXPORT_SYMBOL(devm_gpiod_get_from_of_node);
+
+/**
* devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
* device's child node
* @dev: GPIO consumer
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 15a1f4b348c4..fb7b620763a2 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -9,12 +9,11 @@
* published by the Free Software Foundation.
*/
-#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index abf199609546..21452622d954 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -12,8 +12,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mfd/adp5520.h>
-
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
struct adp5520_gpio {
struct device *master;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index e717f8dc3966..3530ccd17e04 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 8e76d390e653..8c3ff6e2366f 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -18,7 +18,8 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/of_gpio.h> /* For of_mm_gpio_chip */
#include <linux/platform_device.h>
#define ALTERA_GPIO_MAX_NGPIO 32
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 30ad7d7c1678..fdcebe59510d 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -28,7 +28,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index d4e6ba0301bc..ba51ea15f379 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 6b3ca6601af2..77e485557498 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -60,6 +60,7 @@ struct aspeed_gpio_bank {
uint16_t val_regs;
uint16_t irq_regs;
uint16_t debounce_regs;
+ uint16_t tolerance_regs;
const char names[4][3];
};
@@ -70,48 +71,56 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.val_regs = 0x0000,
.irq_regs = 0x0008,
.debounce_regs = 0x0040,
+ .tolerance_regs = 0x001c,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
.irq_regs = 0x0028,
.debounce_regs = 0x0048,
+ .tolerance_regs = 0x003c,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
.irq_regs = 0x0098,
.debounce_regs = 0x00b0,
+ .tolerance_regs = 0x00ac,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
.irq_regs = 0x00e8,
.debounce_regs = 0x0100,
+ .tolerance_regs = 0x00fc,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
.irq_regs = 0x0118,
.debounce_regs = 0x0130,
+ .tolerance_regs = 0x012c,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
.irq_regs = 0x0148,
.debounce_regs = 0x0160,
+ .tolerance_regs = 0x015c,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
.irq_regs = 0x0178,
.debounce_regs = 0x0190,
+ .tolerance_regs = 0x018c,
.names = { "Y", "Z", "AA", "AB" },
},
{
- .val_regs = 0x01E8,
- .irq_regs = 0x01A8,
+ .val_regs = 0x01e8,
+ .irq_regs = 0x01a8,
.debounce_regs = 0x01c0,
+ .tolerance_regs = 0x01bc,
.names = { "AC", "", "", "" },
},
};
@@ -140,7 +149,7 @@ static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
{
unsigned int bank = GPIO_BANK(offset);
- WARN_ON(bank > ARRAY_SIZE(aspeed_gpio_banks));
+ WARN_ON(bank >= ARRAY_SIZE(aspeed_gpio_banks));
return &aspeed_gpio_banks[bank];
}
@@ -534,6 +543,30 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
return 0;
}
+static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
+ unsigned int offset, bool enable)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ const struct aspeed_gpio_bank *bank;
+ unsigned long flags;
+ u32 val;
+
+ bank = to_bank(offset);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ val = readl(gpio->base + bank->tolerance_regs);
+
+ if (enable)
+ val |= GPIO_BIT(offset);
+ else
+ val &= ~GPIO_BIT(offset);
+
+ writel(val, gpio->base + bank->tolerance_regs);
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
if (!have_gpio(gpiochip_get_data(chip), offset))
@@ -771,6 +804,8 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
return -ENOTSUPP;
+ else if (param == PIN_CONFIG_PERSIST_STATE)
+ return aspeed_gpio_reset_tolerance(chip, offset, arg);
return -ENOTSUPP;
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 5fad89dfab7e..3ae7c1876bf4 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -324,3 +324,6 @@ static struct platform_driver ath79_gpio_driver = {
};
module_platform_driver(ath79_gpio_driver);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
deleted file mode 100644
index 4a346b7b4172..000000000000
--- a/drivers/gpio/gpio-axp209.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * AXP20x GPIO driver
- *
- * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/axp20x.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-
-#define AXP20X_GPIO_FUNCTIONS 0x7
-#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
-#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
-#define AXP20X_GPIO_FUNCTION_INPUT 2
-
-struct axp20x_gpio {
- struct gpio_chip chip;
- struct regmap *regmap;
-};
-
-static int axp20x_gpio_get_reg(unsigned offset)
-{
- switch (offset) {
- case 0:
- return AXP20X_GPIO0_CTRL;
- case 1:
- return AXP20X_GPIO1_CTRL;
- case 2:
- return AXP20X_GPIO2_CTRL;
- }
-
- return -EINVAL;
-}
-
-static int axp20x_gpio_input(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- AXP20X_GPIO_FUNCTION_INPUT);
-}
-
-static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int ret;
-
- ret = regmap_read(gpio->regmap, AXP20X_GPIO20_SS, &val);
- if (ret)
- return ret;
-
- return !!(val & BIT(offset + 4));
-}
-
-static int axp20x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int reg, ret;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- ret = regmap_read(gpio->regmap, reg, &val);
- if (ret)
- return ret;
-
- /*
- * This shouldn't really happen if the pin is in use already,
- * or if it's not in use yet, it doesn't matter since we're
- * going to change the value soon anyway. Default to output.
- */
- if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
- return 0;
-
- /*
- * The GPIO directions are the three lowest values.
- * 2 is input, 0 and 1 are output
- */
- return val & 2;
-}
-
-static int axp20x_gpio_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- value ? AXP20X_GPIO_FUNCTION_OUT_HIGH
- : AXP20X_GPIO_FUNCTION_OUT_LOW);
-}
-
-static void axp20x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- axp20x_gpio_output(chip, offset, value);
-}
-
-static int axp20x_gpio_probe(struct platform_device *pdev)
-{
- struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp20x_gpio *gpio;
- int ret;
-
- if (!of_device_is_available(pdev->dev.of_node))
- return -ENODEV;
-
- if (!axp20x) {
- dev_err(&pdev->dev, "Parent drvdata not set\n");
- return -EINVAL;
- }
-
- gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio)
- return -ENOMEM;
-
- gpio->chip.base = -1;
- gpio->chip.can_sleep = true;
- gpio->chip.parent = &pdev->dev;
- gpio->chip.label = dev_name(&pdev->dev);
- gpio->chip.owner = THIS_MODULE;
- gpio->chip.get = axp20x_gpio_get;
- gpio->chip.get_direction = axp20x_gpio_get_direction;
- gpio->chip.set = axp20x_gpio_set;
- gpio->chip.direction_input = axp20x_gpio_input;
- gpio->chip.direction_output = axp20x_gpio_output;
- gpio->chip.ngpio = 3;
-
- gpio->regmap = axp20x->regmap;
-
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register GPIO chip\n");
- return ret;
- }
-
- dev_info(&pdev->dev, "AXP209 GPIO driver loaded\n");
-
- return 0;
-}
-
-static const struct of_device_id axp20x_gpio_match[] = {
- { .compatible = "x-powers,axp209-gpio" },
- { }
-};
-MODULE_DEVICE_TABLE(of, axp20x_gpio_match);
-
-static struct platform_driver axp20x_gpio_driver = {
- .probe = axp20x_gpio_probe,
- .driver = {
- .name = "axp20x-gpio",
- .of_match_table = axp20x_gpio_match,
- },
-};
-
-module_platform_driver(axp20x_gpio_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("AXP20x PMIC GPIO driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 76861a00bb92..eb8369b21e90 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -17,7 +17,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/init.h>
@@ -127,7 +127,7 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
u32 val;
val = readl(reg_base + GPIO_CONTROL(gpio)) & GPIO_GPCTR0_IOTR_MASK;
- return val ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+ return !!val;
}
static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
@@ -144,7 +144,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
/* this function only applies to output pin */
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
goto out;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -170,7 +170,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
reg_offset = GPIO_IN_STATUS(bank_id);
else
reg_offset = GPIO_OUT_STATUS(bank_id);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index bb4f8cf18bd9..16c7f9f49416 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -19,7 +19,6 @@
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
-#include <linux/bitops.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index acefb25e8eca..b8ec75cbd4b5 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -46,7 +46,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Steal the hardware definitions from the bttv driver. */
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index b6f0f729656c..58531d8b8c6e 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -18,7 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/regmap.h>
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 90278b19aa0e..8814c8f47e57 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -12,7 +12,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/cs5535.h>
#include <asm/msr.h>
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index dd8977cf3e85..b6d3e997eb26 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -15,7 +15,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/syscalls.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 82053b52cba0..2f1b5d23b10c 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -13,7 +13,7 @@
*/
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/reg.h>
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index e4b3d7db68c9..0b951ca78ec4 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -9,7 +9,7 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/clk.h>
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 7b3394fdc624..b7a3a2db699b 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -176,8 +176,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
return PTR_ERR(g->base);
irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -EINVAL;
+ if (irq <= 0)
+ return irq ? irq : -EINVAL;
ret = bgpio_init(&g->gc, dev, 4,
g->base + GPIO_DATA_IN,
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 98c7ff2a76e7..8d62db447ec1 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
return platform_driver_register(&iop3xx_gpio_driver);
}
arch_initcall(iop3xx_gpio_init);
+
+MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d43d0a2cc4c5..efb46edff81f 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -414,6 +414,6 @@ static void __exit it87_gpio_exit(void)
module_init(it87_gpio_init);
module_exit(it87_gpio_exit);
-MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
+MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
MODULE_DESCRIPTION("GPIO interface for IT87xx Super I/O chips");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index c04fae1ba32a..9d8bcc69f245 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -709,8 +709,7 @@ static int max732x_probe(struct i2c_client *client,
return 0;
out_failed:
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return ret;
}
@@ -734,8 +733,7 @@ static int max732x_remove(struct i2c_client *client)
gpiochip_remove(&chip->gpio_chip);
/* unregister any dummy i2c_client */
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return 0;
}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index dd67a31ac337..c38624ea0251 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -380,9 +381,16 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
+static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+{
+ const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
+ return dev_name ? dev_name : "pinctrl-merrifield";
+}
+
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mrfld_gpio_pinrange *range;
+ const char *pinctrl_dev_name;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
@@ -439,10 +447,11 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
- "pinctrl-merrifield",
+ pinctrl_dev_name,
range->gpio_base,
range->pin_base,
range->npins);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 9532d86a82f7..3a545ad17817 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -27,13 +27,15 @@
#include "gpiolib.h"
#define GPIO_MOCKUP_NAME "gpio-mockup"
-#define GPIO_MOCKUP_MAX_GC 10
+#define GPIO_MOCKUP_MAX_GC 10
/*
* We're storing two values per chip: the GPIO base and the number
* of GPIO lines.
*/
#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
+#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
+
enum {
GPIO_MOCKUP_DIR_OUT = 0,
GPIO_MOCKUP_DIR_IN = 1,
@@ -46,7 +48,7 @@ enum {
*/
struct gpio_mockup_line_status {
int dir;
- bool value;
+ int value;
};
struct gpio_mockup_chip {
@@ -62,17 +64,33 @@ struct gpio_mockup_dbgfs_private {
int offset;
};
+struct gpio_mockup_platform_data {
+ int base;
+ int ngpio;
+ int index;
+ bool named_lines;
+};
+
static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES];
-static int gpio_mockup_params_nr;
-module_param_array(gpio_mockup_ranges, int, &gpio_mockup_params_nr, 0400);
+static int gpio_mockup_num_ranges;
+module_param_array(gpio_mockup_ranges, int, &gpio_mockup_num_ranges, 0400);
static bool gpio_mockup_named_lines;
module_param_named(gpio_mockup_named_lines,
gpio_mockup_named_lines, bool, 0400);
-static const char gpio_mockup_name_start = 'A';
static struct dentry *gpio_mockup_dbg_dir;
+static int gpio_mockup_range_base(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2];
+}
+
+static int gpio_mockup_range_ngpio(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2 + 1];
+}
+
static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -80,16 +98,26 @@ static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].value;
}
-static void gpio_mockup_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
chip->lines[offset].value = !!value;
}
-static int gpio_mockup_dirout(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, mask, gc->ngpio)
+ gpio_mockup_set(gc, bit, test_bit(bit, bits));
+
+}
+
+static int gpio_mockup_dirout(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -115,29 +143,6 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].dir;
}
-static int gpio_mockup_name_lines(struct device *dev,
- struct gpio_mockup_chip *chip)
-{
- struct gpio_chip *gc = &chip->gc;
- char **names;
- int i;
-
- names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
- if (!names)
- return -ENOMEM;
-
- for (i = 0; i < gc->ngpio; i++) {
- names[i] = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%d", gc->label, i);
- if (!names[i])
- return -ENOMEM;
- }
-
- gc->names = (const char *const *)names;
-
- return 0;
-}
-
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -188,15 +193,21 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
struct gpio_mockup_chip *chip)
{
struct gpio_mockup_dbgfs_private *priv;
- struct dentry *evfile;
+ struct dentry *evfile, *link;
struct gpio_chip *gc;
+ const char *devname;
char *name;
int i;
gc = &chip->gc;
+ devname = dev_name(&gc->gpiodev->dev);
- chip->dbg_dir = debugfs_create_dir(gc->label, gpio_mockup_dbg_dir);
- if (!chip->dbg_dir)
+ chip->dbg_dir = debugfs_create_dir(devname, gpio_mockup_dbg_dir);
+ if (IS_ERR_OR_NULL(chip->dbg_dir))
+ goto err;
+
+ link = debugfs_create_symlink(gc->label, gpio_mockup_dbg_dir, devname);
+ if (IS_ERR_OR_NULL(link))
goto err;
for (i = 0; i < gc->ngpio; i++) {
@@ -214,23 +225,63 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
evfile = debugfs_create_file(name, 0200, chip->dbg_dir, priv,
&gpio_mockup_event_ops);
- if (!evfile)
+ if (IS_ERR_OR_NULL(evfile))
goto err;
}
return;
err:
- dev_err(dev, "error creating debugfs directory\n");
+ dev_err(dev, "error creating debugfs event files\n");
}
-static int gpio_mockup_add(struct device *dev,
- struct gpio_mockup_chip *chip,
- const char *name, int base, int ngpio)
+static int gpio_mockup_name_lines(struct device *dev,
+ struct gpio_mockup_chip *chip)
{
struct gpio_chip *gc = &chip->gc;
- int ret;
+ char **names;
+ int i;
+
+ names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", gc->label, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ gc->names = (const char *const *)names;
+
+ return 0;
+}
+
+static int gpio_mockup_probe(struct platform_device *pdev)
+{
+ struct gpio_mockup_platform_data *pdata;
+ struct gpio_mockup_chip *chip;
+ struct gpio_chip *gc;
+ int rv, base, ngpio;
+ struct device *dev;
+ char *name;
+
+ dev = &pdev->dev;
+ pdata = dev_get_platdata(dev);
+ base = pdata->base;
+ ngpio = pdata->ngpio;
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s-%c",
+ pdev->name, pdata->index);
+ if (!name)
+ return -ENOMEM;
+
+ gc = &chip->gc;
gc->base = base;
gc->ngpio = ngpio;
gc->label = name;
@@ -238,6 +289,7 @@ static int gpio_mockup_add(struct device *dev,
gc->parent = dev;
gc->get = gpio_mockup_get;
gc->set = gpio_mockup_set;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
@@ -248,19 +300,19 @@ static int gpio_mockup_add(struct device *dev,
if (!chip->lines)
return -ENOMEM;
- if (gpio_mockup_named_lines) {
- ret = gpio_mockup_name_lines(dev, chip);
- if (ret)
- return ret;
+ if (pdata->named_lines) {
+ rv = gpio_mockup_name_lines(dev, chip);
+ if (rv)
+ return rv;
}
- ret = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
- if (ret)
- return ret;
+ rv = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
+ if (rv < 0)
+ return rv;
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
- if (ret)
- return ret;
+ rv = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ if (rv)
+ return rv;
if (gpio_mockup_dbg_dir)
gpio_mockup_debugfs_setup(dev, chip);
@@ -268,58 +320,6 @@ static int gpio_mockup_add(struct device *dev,
return 0;
}
-static int gpio_mockup_probe(struct platform_device *pdev)
-{
- int ret, i, base, ngpio, num_chips;
- struct device *dev = &pdev->dev;
- struct gpio_mockup_chip *chips;
- char *chip_name;
-
- if (gpio_mockup_params_nr < 2 || (gpio_mockup_params_nr % 2))
- return -EINVAL;
-
- /* Each chip is described by two values. */
- num_chips = gpio_mockup_params_nr / 2;
-
- chips = devm_kcalloc(dev, num_chips, sizeof(*chips), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, chips);
-
- for (i = 0; i < num_chips; i++) {
- base = gpio_mockup_ranges[i * 2];
-
- if (base == -1)
- ngpio = gpio_mockup_ranges[i * 2 + 1];
- else
- ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
-
- if (ngpio >= 0) {
- chip_name = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%c", GPIO_MOCKUP_NAME,
- gpio_mockup_name_start + i);
- if (!chip_name)
- return -ENOMEM;
-
- ret = gpio_mockup_add(dev, &chips[i],
- chip_name, base, ngpio);
- } else {
- ret = -EINVAL;
- }
-
- if (ret) {
- dev_err(dev,
- "adding gpiochip failed: %d (base: %d, ngpio: %d)\n",
- ret, base, base < 0 ? ngpio : base + ngpio);
-
- return ret;
- }
- }
-
- return 0;
-}
-
static struct platform_driver gpio_mockup_driver = {
.driver = {
.name = GPIO_MOCKUP_NAME,
@@ -327,44 +327,88 @@ static struct platform_driver gpio_mockup_driver = {
.probe = gpio_mockup_probe,
};
-static struct platform_device *pdev;
-static int __init mock_device_init(void)
+static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
+
+static void gpio_mockup_unregister_pdevs(void)
{
- int err;
+ struct platform_device *pdev;
+ int i;
- gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
- if (!gpio_mockup_dbg_dir)
- pr_err("%s: error creating debugfs directory\n",
- GPIO_MOCKUP_NAME);
+ for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
+ pdev = gpio_mockup_pdevs[i];
- pdev = platform_device_alloc(GPIO_MOCKUP_NAME, -1);
- if (!pdev)
- return -ENOMEM;
+ if (pdev)
+ platform_device_unregister(pdev);
+ }
+}
- err = platform_device_add(pdev);
- if (err) {
- platform_device_put(pdev);
- return err;
+static int __init gpio_mockup_init(void)
+{
+ int i, num_chips, err = 0, index = 'A';
+ struct gpio_mockup_platform_data pdata;
+ struct platform_device *pdev;
+
+ if ((gpio_mockup_num_ranges < 2) ||
+ (gpio_mockup_num_ranges % 2) ||
+ (gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES))
+ return -EINVAL;
+
+ /* Each chip is described by two values. */
+ num_chips = gpio_mockup_num_ranges / 2;
+
+ /*
+ * The second value in the <base GPIO - number of GPIOS> pair must
+ * always be greater than 0.
+ */
+ for (i = 0; i < num_chips; i++) {
+ if (gpio_mockup_range_ngpio(i) < 0)
+ return -EINVAL;
}
+ gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
+ if (IS_ERR_OR_NULL(gpio_mockup_dbg_dir))
+ gpio_mockup_err("error creating debugfs directory\n");
+
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
- platform_device_unregister(pdev);
+ gpio_mockup_err("error registering platform driver\n");
return err;
}
+ for (i = 0; i < num_chips; i++) {
+ pdata.index = index++;
+ pdata.base = gpio_mockup_range_base(i);
+ pdata.ngpio = pdata.base < 0
+ ? gpio_mockup_range_ngpio(i)
+ : gpio_mockup_range_ngpio(i) - pdata.base;
+ pdata.named_lines = gpio_mockup_named_lines;
+
+ pdev = platform_device_register_resndata(NULL,
+ GPIO_MOCKUP_NAME,
+ i, NULL, 0, &pdata,
+ sizeof(pdata));
+ if (IS_ERR(pdev)) {
+ gpio_mockup_err("error registering device");
+ platform_driver_unregister(&gpio_mockup_driver);
+ gpio_mockup_unregister_pdevs();
+ return PTR_ERR(pdev);
+ }
+
+ gpio_mockup_pdevs[i] = pdev;
+ }
+
return 0;
}
-static void __exit mock_device_exit(void)
+static void __exit gpio_mockup_exit(void)
{
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- platform_device_unregister(pdev);
+ gpio_mockup_unregister_pdevs();
}
-module_init(mock_device_init);
-module_exit(mock_device_exit);
+module_init(gpio_mockup_init);
+module_exit(gpio_mockup_exit);
MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
MODULE_AUTHOR("Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e136d666f1e5..ab5035b96886 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1058,7 +1058,9 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
{
+ struct gpio_irq_chip *irq;
static int gpio;
+ const char *label;
int irq_base = 0;
int ret;
@@ -1080,21 +1082,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.parent = &omap_mpuio_device.dev;
bank->chip.base = OMAP_MPUIO(0);
} else {
- bank->chip.label = "gpio";
+ label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d",
+ gpio, gpio + bank->width - 1);
+ if (!label)
+ return -ENOMEM;
+ bank->chip.label = label;
bank->chip.base = gpio;
}
bank->chip.ngpio = bank->width;
- ret = gpiochip_add_data(&bank->chip, bank);
- if (ret) {
- dev_err(bank->chip.parent,
- "Could not register gpio chip %d\n", ret);
- return ret;
- }
-
- if (!bank->is_mpuio)
- gpio += bank->width;
-
#ifdef CONFIG_ARCH_OMAP1
/*
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
@@ -1115,25 +1111,30 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
irqc->irq_set_wake = NULL;
}
- ret = gpiochip_irqchip_add(&bank->chip, irqc,
- irq_base, handle_bad_irq,
- IRQ_TYPE_NONE);
+ irq = &bank->chip.irq;
+ irq->chip = irqc;
+ irq->handler = handle_bad_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->num_parents = 1;
+ irq->parents = &bank->irq;
+ irq->first = irq_base;
+ ret = gpiochip_add_data(&bank->chip, bank);
if (ret) {
dev_err(bank->chip.parent,
- "Couldn't add irqchip to gpiochip %d\n", ret);
- gpiochip_remove(&bank->chip);
- return -ENODEV;
+ "Could not register gpio chip %d\n", ret);
+ return ret;
}
- gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
-
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
0, dev_name(bank->chip.parent), bank);
if (ret)
gpiochip_remove(&bank->chip);
+ if (!bank->is_mpuio)
+ gpio += bank->width;
+
return ret;
}
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
new file mode 100644
index 000000000000..f666e2e69074
--- /dev/null
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GPIO driver for the ACCES PCIe-IDIO-24 family
+ * Copyright (C) 2018 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the following ACCES devices: PCIe-IDIO-24,
+ * PCIe-IDI-24, PCIe-IDO-24, and PCIe-IDIO-12.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * struct idio_24_gpio_reg - GPIO device registers structure
+ * @out0_7: Read: FET Outputs 0-7
+ * Write: FET Outputs 0-7
+ * @out8_15: Read: FET Outputs 8-15
+ * Write: FET Outputs 8-15
+ * @out16_23: Read: FET Outputs 16-23
+ * Write: FET Outputs 16-23
+ * @ttl_out0_7: Read: TTL/CMOS Outputs 0-7
+ * Write: TTL/CMOS Outputs 0-7
+ * @in0_7: Read: Isolated Inputs 0-7
+ * Write: Reserved
+ * @in8_15: Read: Isolated Inputs 8-15
+ * Write: Reserved
+ * @in16_23: Read: Isolated Inputs 16-23
+ * Write: Reserved
+ * @ttl_in0_7: Read: TTL/CMOS Inputs 0-7
+ * Write: Reserved
+ * @cos0_7: Read: COS Status Inputs 0-7
+ * Write: COS Clear Inputs 0-7
+ * @cos8_15: Read: COS Status Inputs 8-15
+ * Write: COS Clear Inputs 8-15
+ * @cos16_23: Read: COS Status Inputs 16-23
+ * Write: COS Clear Inputs 16-23
+ * @cos_ttl0_7: Read: COS Status TTL/CMOS 0-7
+ * Write: COS Clear TTL/CMOS 0-7
+ * @ctl: Read: Control Register
+ * Write: Control Register
+ * @reserved: Read: Reserved
+ * Write: Reserved
+ * @cos_enable: Read: COS Enable
+ * Write: COS Enable
+ * @soft_reset: Read: IRQ Output Pin Status
+ * Write: Software Board Reset
+ */
+struct idio_24_gpio_reg {
+ u8 out0_7;
+ u8 out8_15;
+ u8 out16_23;
+ u8 ttl_out0_7;
+ u8 in0_7;
+ u8 in8_15;
+ u8 in16_23;
+ u8 ttl_in0_7;
+ u8 cos0_7;
+ u8 cos8_15;
+ u8 cos16_23;
+ u8 cos_ttl0_7;
+ u8 ctl;
+ u8 reserved;
+ u8 cos_enable;
+ u8 soft_reset;
+};
+
+/**
+ * struct idio_24_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @reg: I/O address offset for the GPIO device registers
+ * @irq_mask: I/O bits affected by interrupts
+ */
+struct idio_24_gpio {
+ struct gpio_chip chip;
+ raw_spinlock_t lock;
+ struct idio_24_gpio_reg __iomem *reg;
+ unsigned long irq_mask;
+};
+
+static int idio_24_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 24)
+ return 0;
+
+ /* Isolated Inputs */
+ if (offset < 48)
+ return 1;
+
+ /* TTL/CMOS I/O */
+ /* OUT MODE = 1 when TTL/CMOS Output Mode is set */
+ return !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask);
+}
+
+static int idio_24_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Clear TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) & ~out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ return 0;
+}
+
+static int idio_24_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Set TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) | out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ chip->set(chip, offset, value);
+ return 0;
+}
+
+static int idio_24_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long offset_mask = BIT(offset % 8);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 8)
+ return !!(ioread8(&idio24gpio->reg->out0_7) & offset_mask);
+
+ if (offset < 16)
+ return !!(ioread8(&idio24gpio->reg->out8_15) & offset_mask);
+
+ if (offset < 24)
+ return !!(ioread8(&idio24gpio->reg->out16_23) & offset_mask);
+
+ /* Isolated Inputs */
+ if (offset < 32)
+ return !!(ioread8(&idio24gpio->reg->in0_7) & offset_mask);
+
+ if (offset < 40)
+ return !!(ioread8(&idio24gpio->reg->in8_15) & offset_mask);
+
+ if (offset < 48)
+ return !!(ioread8(&idio24gpio->reg->in16_23) & offset_mask);
+
+ /* TTL/CMOS Outputs */
+ if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
+ return !!(ioread8(&idio24gpio->reg->ttl_out0_7) & offset_mask);
+
+ /* TTL/CMOS Inputs */
+ return !!(ioread8(&idio24gpio->reg->ttl_in0_7) & offset_mask);
+}
+
+static void idio_24_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+ void __iomem *base;
+ const unsigned int mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned int out_state;
+
+ /* Isolated Inputs */
+ if (offset > 23 && offset < 48)
+ return;
+
+ /* TTL/CMOS Inputs */
+ if (offset > 47 && !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask))
+ return;
+
+ /* TTL/CMOS Outputs */
+ if (offset > 47)
+ base = &idio24gpio->reg->ttl_out0_7;
+ /* FET Outputs */
+ else if (offset > 15)
+ base = &idio24gpio->reg->out16_23;
+ else if (offset > 7)
+ base = &idio24gpio->reg->out8_15;
+ else
+ base = &idio24gpio->reg->out0_7;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ if (value)
+ out_state = ioread8(base) | mask;
+ else
+ out_state = ioread8(base) & ~mask;
+
+ iowrite8(out_state, base);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_ack(struct irq_data *data)
+{
+}
+
+static void idio_24_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ unsigned char new_irq_mask;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ idio24gpio->irq_mask &= BIT(bit_offset);
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset;
+
+ if (!new_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Disable Rising Edge detection */
+ cos_enable_state &= ~BIT(bank_offset);
+ /* Disable Falling Edge detection */
+ cos_enable_state &= ~BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned char prev_irq_mask;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ idio24gpio->irq_mask |= BIT(bit_offset);
+
+ if (!prev_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Enable Rising Edge detection */
+ cos_enable_state |= BIT(bank_offset);
+ /* Enable Falling Edge detection */
+ cos_enable_state |= BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static int idio_24_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* The only valid irq types are none and both-edges */
+ if (flow_type != IRQ_TYPE_NONE &&
+ (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip idio_24_irqchip = {
+ .name = "pcie-idio-24",
+ .irq_ack = idio_24_irq_ack,
+ .irq_mask = idio_24_irq_mask,
+ .irq_unmask = idio_24_irq_unmask,
+ .irq_set_type = idio_24_irq_set_type
+};
+
+static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
+{
+ struct idio_24_gpio *const idio24gpio = dev_id;
+ unsigned long irq_status;
+ struct gpio_chip *const chip = &idio24gpio->chip;
+ unsigned long irq_mask;
+ int gpio;
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Read Change-Of-State status */
+ irq_status = ioread32(&idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ /* Make sure our device generated IRQ */
+ if (!irq_status)
+ return IRQ_NONE;
+
+ /* Handle only unmasked IRQ */
+ irq_mask = idio24gpio->irq_mask & irq_status;
+
+ for_each_set_bit(gpio, &irq_mask, chip->ngpio - 24)
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
+ gpio + 24));
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Clear Change-Of-State status */
+ iowrite32(irq_status, &idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ return IRQ_HANDLED;
+}
+
+#define IDIO_24_NGPIO 56
+static const char *idio_24_names[IDIO_24_NGPIO] = {
+ "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7",
+ "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15",
+ "OUT16", "OUT17", "OUT18", "OUT19", "OUT20", "OUT21", "OUT22", "OUT23",
+ "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7",
+ "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15",
+ "IIN16", "IIN17", "IIN18", "IIN19", "IIN20", "IIN21", "IIN22", "IIN23",
+ "TTL0", "TTL1", "TTL2", "TTL3", "TTL4", "TTL5", "TTL6", "TTL7"
+};
+
+static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *const dev = &pdev->dev;
+ struct idio_24_gpio *idio24gpio;
+ int err;
+ const size_t pci_bar_index = 2;
+ const char *const name = pci_name(pdev);
+
+ idio24gpio = devm_kzalloc(dev, sizeof(*idio24gpio), GFP_KERNEL);
+ if (!idio24gpio)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device (%d)\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
+ if (err) {
+ dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
+ return err;
+ }
+
+ idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
+
+ idio24gpio->chip.label = name;
+ idio24gpio->chip.parent = dev;
+ idio24gpio->chip.owner = THIS_MODULE;
+ idio24gpio->chip.base = -1;
+ idio24gpio->chip.ngpio = IDIO_24_NGPIO;
+ idio24gpio->chip.names = idio_24_names;
+ idio24gpio->chip.get_direction = idio_24_gpio_get_direction;
+ idio24gpio->chip.direction_input = idio_24_gpio_direction_input;
+ idio24gpio->chip.direction_output = idio_24_gpio_direction_output;
+ idio24gpio->chip.get = idio_24_gpio_get;
+ idio24gpio->chip.set = idio_24_gpio_set;
+
+ raw_spin_lock_init(&idio24gpio->lock);
+
+ /* Software board reset */
+ iowrite8(0, &idio24gpio->reg->soft_reset);
+
+ err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ err = gpiochip_irqchip_add(&idio24gpio->chip, &idio_24_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, pdev->irq, idio_24_irq_handler, IRQF_SHARED,
+ name, idio24gpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id idio_24_pci_dev_id[] = {
+ { PCI_DEVICE(0x494F, 0x0FD0) }, { PCI_DEVICE(0x494F, 0x0BD0) },
+ { PCI_DEVICE(0x494F, 0x07D0) }, { PCI_DEVICE(0x494F, 0x0FC0) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, idio_24_pci_dev_id);
+
+static struct pci_driver idio_24_driver = {
+ .name = "pcie-idio-24",
+ .id_table = idio_24_pci_dev_id,
+ .probe = idio_24_probe
+};
+
+module_pci_driver(idio_24_driver);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES PCIe-IDIO-24 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e6e5cca624a7..f8d7d1cd8488 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
};
int i, j;
+ /*
+ * STMPE1600: to be able to get IRQ from pins,
+ * a read must be done on GPMR register, or a write in
+ * GPSR or GPCR registers
+ */
+ if (stmpe->partnum == STMPE1600) {
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ }
+
for (i = 0; i < CACHE_NR_REGS; i++) {
/* STMPE801 and STMPE1600 don't have RE and FE registers */
if ((stmpe->partnum == STMPE801 ||
@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
- struct stmpe *stmpe = stmpe_gpio->stmpe;
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = BIT(offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
-
- /*
- * STMPE1600 workaround: to be able to get IRQ from pins,
- * a read must be done on GPMR register, or a write in
- * GPSR or GPCR registers
- */
- if (stmpe->partnum == STMPE1600)
- stmpe_reg_read(stmpe,
- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
}
static void stmpe_dbg_show_one(struct seq_file *s,
@@ -273,15 +273,21 @@ static void stmpe_dbg_show_one(struct seq_file *s,
u8 fall_reg;
u8 irqen_reg;
- char *edge_det_values[] = {"edge-inactive",
- "edge-asserted",
- "not-supported"};
- char *rise_values[] = {"no-rising-edge-detection",
- "rising-edge-detection",
- "not-supported"};
- char *fall_values[] = {"no-falling-edge-detection",
- "falling-edge-detection",
- "not-supported"};
+ static const char * const edge_det_values[] = {
+ "edge-inactive",
+ "edge-asserted",
+ "not-supported"
+ };
+ static const char * const rise_values[] = {
+ "no-rising-edge-detection",
+ "rising-edge-detection",
+ "not-supported"
+ };
+ static const char * const fall_values[] = {
+ "no-falling-edge-detection",
+ "falling-edge-detection",
+ "not-supported"
+ };
#define NOT_SUPPORTED_IDX 2
u8 edge_det = NOT_SUPPORTED_IDX;
u8 rise = NOT_SUPPORTED_IDX;
@@ -344,7 +350,7 @@ static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
for (i = 0; i < gc->ngpio; i++, gpio++) {
stmpe_dbg_show_one(s, gc, i, gpio);
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
}
}
@@ -426,12 +432,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
struct stmpe_gpio *stmpe_gpio;
- int ret;
- int irq = 0;
-
- irq = platform_get_irq(pdev, 0);
+ int ret, irq;
- stmpe_gpio = kzalloc(sizeof(struct stmpe_gpio), GFP_KERNEL);
+ stmpe_gpio = kzalloc(sizeof(*stmpe_gpio), GFP_KERNEL);
if (!stmpe_gpio)
return -ENOMEM;
@@ -453,6 +456,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
if (stmpe_gpio->norequest_mask)
stmpe_gpio->chip.irq.need_valid_mask = true;
+ irq = platform_get_irq(pdev, 0);
if (irq < 0)
dev_info(&pdev->dev,
"device configured in no-irq mode: "
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index b5adb79a631a..d16e9d4a129b 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -553,8 +553,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
0, 0, of_node_to_fwnode(dev->of_node),
&thunderx_gpio_irqd_ops, txgpio);
- if (!txgpio->irqd)
+ if (!txgpio->irqd) {
+ err = -ENOMEM;
goto out;
+ }
/* Push on irq_data and the domain for each line. */
for (i = 0; i < ngpio; i++) {
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
new file mode 100644
index 000000000000..7f8f5b02e31d
--- /dev/null
+++ b/drivers/gpio/gpio-winbond.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPIO interface for Winbond Super I/O chips
+ * Currently, only W83627UHG (Nuvoton NCT6627UD) is supported.
+ *
+ * Author: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/gpio/driver.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+
+#define WB_GPIO_DRIVER_NAME KBUILD_MODNAME
+
+#define WB_SIO_BASE 0x2e
+#define WB_SIO_BASE_HIGH 0x4e
+
+#define WB_SIO_EXT_ENTER_KEY 0x87
+#define WB_SIO_EXT_EXIT_KEY 0xaa
+
+/* global chip registers */
+
+#define WB_SIO_REG_LOGICAL 0x07
+
+#define WB_SIO_REG_CHIP_MSB 0x20
+#define WB_SIO_REG_CHIP_LSB 0x21
+
+#define WB_SIO_CHIP_ID_W83627UHG 0xa230
+#define WB_SIO_CHIP_ID_W83627UHG_MASK GENMASK(15, 4)
+
+#define WB_SIO_REG_DPD 0x22
+#define WB_SIO_REG_DPD_UARTA 4
+#define WB_SIO_REG_DPD_UARTB 5
+
+#define WB_SIO_REG_IDPD 0x23
+#define WB_SIO_REG_IDPD_UARTC 4
+#define WB_SIO_REG_IDPD_UARTD 5
+#define WB_SIO_REG_IDPD_UARTE 6
+#define WB_SIO_REG_IDPD_UARTF 7
+
+#define WB_SIO_REG_GLOBAL_OPT 0x24
+#define WB_SIO_REG_GO_ENFDC 1
+
+#define WB_SIO_REG_OVTGPIO3456 0x29
+#define WB_SIO_REG_OG3456_G3PP 3
+#define WB_SIO_REG_OG3456_G4PP 4
+#define WB_SIO_REG_OG3456_G5PP 5
+#define WB_SIO_REG_OG3456_G6PP 7
+
+#define WB_SIO_REG_I2C_PS 0x2a
+#define WB_SIO_REG_I2CPS_I2CFS 1
+
+#define WB_SIO_REG_GPIO1_MF 0x2c
+#define WB_SIO_REG_G1MF_G1PP 6
+#define WB_SIO_REG_G1MF_G2PP 7
+#define WB_SIO_REG_G1MF_FS_MASK GENMASK(1, 0)
+#define WB_SIO_REG_G1MF_FS_IR_OFF 0
+#define WB_SIO_REG_G1MF_FS_IR 1
+#define WB_SIO_REG_G1MF_FS_GPIO1 2
+#define WB_SIO_REG_G1MF_FS_UARTB 3
+
+/* not an actual device number, just a value meaning 'no device' */
+#define WB_SIO_DEV_NONE 0xff
+
+/* registers with offsets >= 0x30 are specific for a particular device */
+
+/* UART B logical device */
+#define WB_SIO_DEV_UARTB 0x03
+#define WB_SIO_UARTB_REG_ENABLE 0x30
+#define WB_SIO_UARTB_ENABLE_ON 0
+
+/* UART C logical device */
+#define WB_SIO_DEV_UARTC 0x06
+#define WB_SIO_UARTC_REG_ENABLE 0x30
+#define WB_SIO_UARTC_ENABLE_ON 0
+
+/* GPIO3, GPIO4 logical device */
+#define WB_SIO_DEV_GPIO34 0x07
+#define WB_SIO_GPIO34_REG_ENABLE 0x30
+#define WB_SIO_GPIO34_ENABLE_3 0
+#define WB_SIO_GPIO34_ENABLE_4 1
+#define WB_SIO_GPIO34_REG_IO3 0xe0
+#define WB_SIO_GPIO34_REG_DATA3 0xe1
+#define WB_SIO_GPIO34_REG_INV3 0xe2
+#define WB_SIO_GPIO34_REG_IO4 0xe4
+#define WB_SIO_GPIO34_REG_DATA4 0xe5
+#define WB_SIO_GPIO34_REG_INV4 0xe6
+
+/* WDTO, PLED, GPIO5, GPIO6 logical device */
+#define WB_SIO_DEV_WDGPIO56 0x08
+#define WB_SIO_WDGPIO56_REG_ENABLE 0x30
+#define WB_SIO_WDGPIO56_ENABLE_5 1
+#define WB_SIO_WDGPIO56_ENABLE_6 2
+#define WB_SIO_WDGPIO56_REG_IO5 0xe0
+#define WB_SIO_WDGPIO56_REG_DATA5 0xe1
+#define WB_SIO_WDGPIO56_REG_INV5 0xe2
+#define WB_SIO_WDGPIO56_REG_IO6 0xe4
+#define WB_SIO_WDGPIO56_REG_DATA6 0xe5
+#define WB_SIO_WDGPIO56_REG_INV6 0xe6
+
+/* GPIO1, GPIO2, SUSLED logical device */
+#define WB_SIO_DEV_GPIO12 0x09
+#define WB_SIO_GPIO12_REG_ENABLE 0x30
+#define WB_SIO_GPIO12_ENABLE_1 0
+#define WB_SIO_GPIO12_ENABLE_2 1
+#define WB_SIO_GPIO12_REG_IO1 0xe0
+#define WB_SIO_GPIO12_REG_DATA1 0xe1
+#define WB_SIO_GPIO12_REG_INV1 0xe2
+#define WB_SIO_GPIO12_REG_IO2 0xe4
+#define WB_SIO_GPIO12_REG_DATA2 0xe5
+#define WB_SIO_GPIO12_REG_INV2 0xe6
+
+/* UART D logical device */
+#define WB_SIO_DEV_UARTD 0x0d
+#define WB_SIO_UARTD_REG_ENABLE 0x30
+#define WB_SIO_UARTD_ENABLE_ON 0
+
+/* UART E logical device */
+#define WB_SIO_DEV_UARTE 0x0e
+#define WB_SIO_UARTE_REG_ENABLE 0x30
+#define WB_SIO_UARTE_ENABLE_ON 0
+
+/*
+ * for a description what a particular field of this struct means please see
+ * a description of the relevant module parameter at the bottom of this file
+ */
+struct winbond_gpio_params {
+ unsigned long base;
+ unsigned long gpios;
+ unsigned long ppgpios;
+ unsigned long odgpios;
+ bool pledgpio;
+ bool beepgpio;
+ bool i2cgpio;
+};
+
+static struct winbond_gpio_params params;
+
+static int winbond_sio_enter(unsigned long base)
+{
+ if (!request_muxed_region(base, 2, WB_GPIO_DRIVER_NAME))
+ return -EBUSY;
+
+ /*
+ * datasheet says two successive writes of the "key" value are needed
+ * in order for chip to enter the "Extended Function Mode"
+ */
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+
+ return 0;
+}
+
+static void winbond_sio_select_logical(unsigned long base, u8 dev)
+{
+ outb(WB_SIO_REG_LOGICAL, base);
+ outb(dev, base + 1);
+}
+
+static void winbond_sio_leave(unsigned long base)
+{
+ outb(WB_SIO_EXT_EXIT_KEY, base);
+
+ release_region(base, 2);
+}
+
+static void winbond_sio_reg_write(unsigned long base, u8 reg, u8 data)
+{
+ outb(reg, base);
+ outb(data, base + 1);
+}
+
+static u8 winbond_sio_reg_read(unsigned long base, u8 reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void winbond_sio_reg_bset(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val |= BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static void winbond_sio_reg_bclear(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val &= ~BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static bool winbond_sio_reg_btest(unsigned long base, u8 reg, u8 bit)
+{
+ return winbond_sio_reg_read(base, reg) & BIT(bit);
+}
+
+/**
+ * struct winbond_gpio_port_conflict - possibly conflicting device information
+ * @name: device name (NULL means no conflicting device defined)
+ * @dev: Super I/O logical device number where the testreg register
+ * is located (or WB_SIO_DEV_NONE - don't select any
+ * logical device)
+ * @testreg: register number where the testbit bit is located
+ * @testbit: index of a bit to check whether an actual conflict exists
+ * @warnonly: if set then a conflict isn't fatal (just warn about it),
+ * otherwise disable the particular GPIO port if a conflict
+ * is detected
+ */
+struct winbond_gpio_port_conflict {
+ const char *name;
+ u8 dev;
+ u8 testreg;
+ u8 testbit;
+ bool warnonly;
+};
+
+/**
+ * struct winbond_gpio_info - information about a particular GPIO port (device)
+ * @dev: Super I/O logical device number of the registers
+ * specified below
+ * @enablereg: port enable bit register number
+ * @enablebit: index of a port enable bit
+ * @outputreg: output driver mode bit register number
+ * @outputppbit: index of a push-pull output driver mode bit
+ * @ioreg: data direction register number
+ * @invreg: pin data inversion register number
+ * @datareg: pin data register number
+ * @conflict: description of a device that possibly conflicts with
+ * this port
+ */
+struct winbond_gpio_info {
+ u8 dev;
+ u8 enablereg;
+ u8 enablebit;
+ u8 outputreg;
+ u8 outputppbit;
+ u8 ioreg;
+ u8 invreg;
+ u8 datareg;
+ struct winbond_gpio_port_conflict conflict;
+};
+
+static const struct winbond_gpio_info winbond_gpio_infos[6] = {
+ { /* 0 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_1,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G1PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO1,
+ .invreg = WB_SIO_GPIO12_REG_INV1,
+ .datareg = WB_SIO_GPIO12_REG_DATA1,
+ .conflict = {
+ .name = "UARTB",
+ .dev = WB_SIO_DEV_UARTB,
+ .testreg = WB_SIO_UARTB_REG_ENABLE,
+ .testbit = WB_SIO_UARTB_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 1 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_2,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G2PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO2,
+ .invreg = WB_SIO_GPIO12_REG_INV2,
+ .datareg = WB_SIO_GPIO12_REG_DATA2
+ /* special conflict handling so doesn't use conflict data */
+ },
+ { /* 2 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_3,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G3PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO3,
+ .invreg = WB_SIO_GPIO34_REG_INV3,
+ .datareg = WB_SIO_GPIO34_REG_DATA3,
+ .conflict = {
+ .name = "UARTC",
+ .dev = WB_SIO_DEV_UARTC,
+ .testreg = WB_SIO_UARTC_REG_ENABLE,
+ .testbit = WB_SIO_UARTC_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 3 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_4,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G4PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO4,
+ .invreg = WB_SIO_GPIO34_REG_INV4,
+ .datareg = WB_SIO_GPIO34_REG_DATA4,
+ .conflict = {
+ .name = "UARTD",
+ .dev = WB_SIO_DEV_UARTD,
+ .testreg = WB_SIO_UARTD_REG_ENABLE,
+ .testbit = WB_SIO_UARTD_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 4 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_5,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G5PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO5,
+ .invreg = WB_SIO_WDGPIO56_REG_INV5,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA5,
+ .conflict = {
+ .name = "UARTE",
+ .dev = WB_SIO_DEV_UARTE,
+ .testreg = WB_SIO_UARTE_REG_ENABLE,
+ .testbit = WB_SIO_UARTE_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 5 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_6,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G6PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO6,
+ .invreg = WB_SIO_WDGPIO56_REG_INV6,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA6,
+ .conflict = {
+ .name = "FDC",
+ .dev = WB_SIO_DEV_NONE,
+ .testreg = WB_SIO_REG_GLOBAL_OPT,
+ .testbit = WB_SIO_REG_GO_ENFDC,
+ .warnonly = false
+ }
+ }
+};
+
+/* returns whether changing a pin is allowed */
+static bool winbond_gpio_get_info(unsigned int *gpio_num,
+ const struct winbond_gpio_info **info)
+{
+ bool allow_changing = true;
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG) {
+ if (*gpio_num < 8)
+ break;
+
+ *gpio_num -= 8;
+ }
+
+ *info = &winbond_gpio_infos[i];
+
+ /*
+ * GPIO2 (the second port) shares some pins with a basic PC
+ * functionality, which is very likely controlled by the firmware.
+ * Don't allow changing these pins by default.
+ */
+ if (i == 1) {
+ if (*gpio_num == 0 && !params.pledgpio)
+ allow_changing = false;
+ else if (*gpio_num == 1 && !params.beepgpio)
+ allow_changing = false;
+ else if ((*gpio_num == 5 || *gpio_num == 6) && !params.i2cgpio)
+ allow_changing = false;
+ }
+
+ return allow_changing;
+}
+
+static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ bool val;
+
+ winbond_gpio_get_info(&offset, &info);
+
+ val = winbond_sio_enter(*base);
+ if (val)
+ return val;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ val = winbond_sio_reg_btest(*base, info->datareg, offset);
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ winbond_sio_leave(*base);
+
+ return val;
+}
+
+static int winbond_gpio_direction_in(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bset(*base, info->ioreg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static int winbond_gpio_direction_out(struct gpio_chip *gc,
+ unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bclear(*base, info->ioreg, offset);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return;
+
+ if (winbond_sio_enter(*base) != 0)
+ return;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+}
+
+static struct gpio_chip winbond_gpio_chip = {
+ .base = -1,
+ .label = WB_GPIO_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .can_sleep = true,
+ .get = winbond_gpio_get,
+ .direction_input = winbond_gpio_direction_in,
+ .set = winbond_gpio_set,
+ .direction_output = winbond_gpio_direction_out,
+};
+
+static void winbond_gpio_configure_port0_pins(unsigned long base)
+{
+ unsigned int val;
+
+ val = winbond_sio_reg_read(base, WB_SIO_REG_GPIO1_MF);
+ if ((val & WB_SIO_REG_G1MF_FS_MASK) == WB_SIO_REG_G1MF_FS_GPIO1)
+ return;
+
+ pr_warn("GPIO1 pins were connected to something else (%.2x), fixing\n",
+ val);
+
+ val &= ~WB_SIO_REG_G1MF_FS_MASK;
+ val |= WB_SIO_REG_G1MF_FS_GPIO1;
+
+ winbond_sio_reg_write(base, WB_SIO_REG_GPIO1_MF, val);
+}
+
+static void winbond_gpio_configure_port1_check_i2c(unsigned long base)
+{
+ params.i2cgpio = !winbond_sio_reg_btest(base, WB_SIO_REG_I2C_PS,
+ WB_SIO_REG_I2CPS_I2CFS);
+ if (!params.i2cgpio)
+ pr_warn("disabling GPIO2.5 and GPIO2.6 as I2C is enabled\n");
+}
+
+static bool winbond_gpio_configure_port(unsigned long base, unsigned int idx)
+{
+ const struct winbond_gpio_info *info = &winbond_gpio_infos[idx];
+ const struct winbond_gpio_port_conflict *conflict = &info->conflict;
+
+ /* is there a possible conflicting device defined? */
+ if (conflict->name != NULL) {
+ if (conflict->dev != WB_SIO_DEV_NONE)
+ winbond_sio_select_logical(base, conflict->dev);
+
+ if (winbond_sio_reg_btest(base, conflict->testreg,
+ conflict->testbit)) {
+ if (conflict->warnonly)
+ pr_warn("enabled GPIO%u share pins with active %s\n",
+ idx + 1, conflict->name);
+ else {
+ pr_warn("disabling GPIO%u as %s is enabled\n",
+ idx + 1, conflict->name);
+ return false;
+ }
+ }
+ }
+
+ /* GPIO1 and GPIO2 need some (additional) special handling */
+ if (idx == 0)
+ winbond_gpio_configure_port0_pins(base);
+ else if (idx == 1)
+ winbond_gpio_configure_port1_check_i2c(base);
+
+ winbond_sio_select_logical(base, info->dev);
+
+ winbond_sio_reg_bset(base, info->enablereg, info->enablebit);
+
+ if (params.ppgpios & BIT(idx))
+ winbond_sio_reg_bset(base, info->outputreg,
+ info->outputppbit);
+ else if (params.odgpios & BIT(idx))
+ winbond_sio_reg_bclear(base, info->outputreg,
+ info->outputppbit);
+ else
+ pr_notice("GPIO%u pins are %s\n", idx + 1,
+ winbond_sio_reg_btest(base, info->outputreg,
+ info->outputppbit) ?
+ "push-pull" :
+ "open drain");
+
+ return true;
+}
+
+static int winbond_gpio_configure(unsigned long base)
+{
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG)
+ if (!winbond_gpio_configure_port(base, i))
+ __clear_bit(i, &params.gpios);
+
+ if (!params.gpios) {
+ pr_err("please use 'gpios' module parameter to select some active GPIO ports to enable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int winbond_gpio_check_chip(unsigned long base)
+{
+ int ret;
+ unsigned int chip;
+
+ ret = winbond_sio_enter(base);
+ if (ret)
+ return ret;
+
+ chip = winbond_sio_reg_read(base, WB_SIO_REG_CHIP_MSB) << 8;
+ chip |= winbond_sio_reg_read(base, WB_SIO_REG_CHIP_LSB);
+
+ pr_notice("chip ID at %lx is %.4x\n", base, chip);
+
+ if ((chip & WB_SIO_CHIP_ID_W83627UHG_MASK) !=
+ WB_SIO_CHIP_ID_W83627UHG) {
+ pr_err("not an our chip\n");
+ ret = -ENODEV;
+ }
+
+ winbond_sio_leave(base);
+
+ return ret;
+}
+
+static int winbond_gpio_imatch(struct device *dev, unsigned int id)
+{
+ unsigned long gpios_rem;
+ int ret;
+
+ gpios_rem = params.gpios & ~GENMASK(ARRAY_SIZE(winbond_gpio_infos) - 1,
+ 0);
+ if (gpios_rem) {
+ pr_warn("unknown ports (%lx) enabled in GPIO ports bitmask\n",
+ gpios_rem);
+ params.gpios &= ~gpios_rem;
+ }
+
+ if (params.ppgpios & params.odgpios) {
+ pr_err("some GPIO ports are set both to push-pull and open drain mode at the same time\n");
+ return 0;
+ }
+
+ if (params.base != 0)
+ return winbond_gpio_check_chip(params.base) == 0;
+
+ /*
+ * if the 'base' module parameter is unset probe two chip default
+ * I/O port bases
+ */
+ params.base = WB_SIO_BASE;
+ ret = winbond_gpio_check_chip(params.base);
+ if (ret == 0)
+ return 1;
+ if (ret != -ENODEV && ret != -EBUSY)
+ return 0;
+
+ params.base = WB_SIO_BASE_HIGH;
+ return winbond_gpio_check_chip(params.base) == 0;
+}
+
+static int winbond_gpio_iprobe(struct device *dev, unsigned int id)
+{
+ int ret;
+
+ if (params.base == 0)
+ return -EINVAL;
+
+ ret = winbond_sio_enter(params.base);
+ if (ret)
+ return ret;
+
+ ret = winbond_gpio_configure(params.base);
+
+ winbond_sio_leave(params.base);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Add 8 gpios for every GPIO port that was enabled in gpios
+ * module parameter (that wasn't disabled earlier in
+ * winbond_gpio_configure() & co. due to, for example, a pin conflict).
+ */
+ winbond_gpio_chip.ngpio = hweight_long(params.gpios) * 8;
+
+ /*
+ * GPIO6 port has only 5 pins, so if it is enabled we have to adjust
+ * the total count appropriately
+ */
+ if (params.gpios & BIT(5))
+ winbond_gpio_chip.ngpio -= (8 - 5);
+
+ winbond_gpio_chip.parent = dev;
+
+ return devm_gpiochip_add_data(dev, &winbond_gpio_chip, &params.base);
+}
+
+static struct isa_driver winbond_gpio_idriver = {
+ .driver = {
+ .name = WB_GPIO_DRIVER_NAME,
+ },
+ .match = winbond_gpio_imatch,
+ .probe = winbond_gpio_iprobe,
+};
+
+module_isa_driver(winbond_gpio_idriver, 1);
+
+module_param_named(base, params.base, ulong, 0444);
+MODULE_PARM_DESC(base,
+ "I/O port base (when unset - probe chip default ones)");
+
+/* This parameter sets which GPIO devices (ports) we enable */
+module_param_named(gpios, params.gpios, ulong, 0444);
+MODULE_PARM_DESC(gpios,
+ "bitmask of GPIO ports to enable (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * These two parameters below set how we configure GPIO ports output drivers.
+ * It can't be a one bitmask since we need three values per port: push-pull,
+ * open-drain and keep as-is (this is the default).
+ */
+module_param_named(ppgpios, params.ppgpios, ulong, 0444);
+MODULE_PARM_DESC(ppgpios,
+ "bitmask of GPIO ports to set to push-pull mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+module_param_named(odgpios, params.odgpios, ulong, 0444);
+MODULE_PARM_DESC(odgpios,
+ "bitmask of GPIO ports to set to open drain mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * GPIO2.0 and GPIO2.1 control a basic PC functionality that we
+ * don't allow tinkering with by default (it is very likely that the
+ * firmware owns these pins).
+ * These two parameters below allow overriding these prohibitions.
+ */
+module_param_named(pledgpio, params.pledgpio, bool, 0644);
+MODULE_PARM_DESC(pledgpio,
+ "enable changing value of GPIO2.0 bit (Power LED), default no.");
+
+module_param_named(beepgpio, params.beepgpio, bool, 0644);
+MODULE_PARM_DESC(beepgpio,
+ "enable changing value of GPIO2.1 bit (BEEP), default no.");
+
+MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
+MODULE_DESCRIPTION("GPIO interface for Winbond Super I/O chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d6f3d9ee1350..0ecffd172a80 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -414,7 +414,8 @@ EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
- struct acpi_reference_args *args)
+ struct acpi_reference_args *args,
+ unsigned int *quirks)
{
const struct acpi_gpio_mapping *gm;
@@ -430,6 +431,8 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
args->args[1] = par->line_index;
args->args[2] = par->active_low;
args->nargs = 3;
+
+ *quirks = gm->quirks;
return true;
}
@@ -461,8 +464,8 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
}
}
-int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+static int
+__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
{
int ret = 0;
@@ -489,12 +492,31 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
return ret;
}
+int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
+{
+ struct device *dev = &info->adev->dev;
+ enum gpiod_flags old = *flags;
+ int ret;
+
+ ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
+ if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
+ if (ret)
+ dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
+ } else {
+ if (ret)
+ dev_dbg(dev, "Override GPIO initialization flags\n");
+ *flags = old;
+ }
+
+ return ret;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
int pin_index;
bool active_low;
- struct acpi_device *adev;
struct gpio_desc *desc;
int n;
};
@@ -531,8 +553,8 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.triggering = agpio->triggering;
} else {
lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio);
+ lookup->info.polarity = lookup->active_low;
}
-
}
return 1;
@@ -541,12 +563,13 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
struct acpi_gpio_info *info)
{
+ struct acpi_device *adev = lookup->info.adev;
struct list_head res_list;
int ret;
INIT_LIST_HEAD(&res_list);
- ret = acpi_dev_get_resources(lookup->adev, &res_list,
+ ret = acpi_dev_get_resources(adev, &res_list,
acpi_populate_gpio_lookup,
lookup);
if (ret < 0)
@@ -557,11 +580,8 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
if (!lookup->desc)
return -ENOENT;
- if (info) {
+ if (info)
*info = lookup->info;
- if (lookup->active_low)
- info->polarity = lookup->active_low;
- }
return 0;
}
@@ -570,6 +590,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
struct acpi_gpio_lookup *lookup)
{
struct acpi_reference_args args;
+ unsigned int quirks = 0;
int ret;
memset(&args, 0, sizeof(args));
@@ -581,14 +602,14 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
if (!adev)
return ret;
- if (!acpi_get_driver_gpio_data(adev, propname, index, &args))
+ if (!acpi_get_driver_gpio_data(adev, propname, index, &args,
+ &quirks))
return ret;
}
/*
* The property was found and resolved, so need to lookup the GPIO based
* on returned args.
*/
- lookup->adev = args.adev;
if (args.nargs != 3)
return -EPROTO;
@@ -596,6 +617,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
lookup->pin_index = args.args[1];
lookup->active_low = !!args.args[2];
+ lookup->info.adev = args.adev;
+ lookup->info.quirks = quirks;
return 0;
}
@@ -643,11 +666,11 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
return ERR_PTR(ret);
dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %d %u\n",
- dev_name(&lookup.adev->dev), lookup.index,
+ dev_name(&lookup.info.adev->dev), lookup.index,
lookup.pin_index, lookup.active_low);
} else {
dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.adev = adev;
+ lookup.info.adev = adev;
}
ret = acpi_gpio_resource_lookup(&lookup, info);
@@ -664,7 +687,6 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
struct acpi_gpio_info info;
struct gpio_desc *desc;
char propname[32];
- int err;
int i;
/* Try first from _DSD */
@@ -703,10 +725,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
if (info.polarity == GPIO_ACTIVE_LOW)
*lookupflags |= GPIO_ACTIVE_LOW;
- err = acpi_gpio_update_gpiod_flags(dflags, info.flags);
- if (err)
- dev_dbg(dev, "Override GPIO initialization flags\n");
-
+ acpi_gpio_update_gpiod_flags(dflags, &info);
return desc;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 72a0695d2ac3..564bb7a31da4 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -56,6 +56,42 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
return gpiochip_get_desc(chip, ret);
}
+static void of_gpio_flags_quirks(struct device_node *np,
+ enum of_gpio_flags *flags)
+{
+ /*
+ * Some GPIO fixed regulator quirks.
+ * Note that active low is the default.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ (of_device_is_compatible(np, "reg-fixed-voltage") ||
+ of_device_is_compatible(np, "regulator-gpio"))) {
+ /*
+ * The regulator GPIO handles are specified such that the
+ * presence or absence of "enable-active-high" solely controls
+ * the polarity of the GPIO line. Any phandle flags must
+ * be actively ignored.
+ */
+ if (*flags & OF_GPIO_ACTIVE_LOW) {
+ pr_warn("%s GPIO handle specifies active low - ignored\n",
+ of_node_full_name(np));
+ *flags &= ~OF_GPIO_ACTIVE_LOW;
+ }
+ if (!of_property_read_bool(np, "enable-active-high"))
+ *flags |= OF_GPIO_ACTIVE_LOW;
+ }
+ /*
+ * Legacy open drain handling for fixed voltage regulators.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ of_device_is_compatible(np, "reg-fixed-voltage") &&
+ of_property_read_bool(np, "gpio-open-drain")) {
+ *flags |= (OF_GPIO_SINGLE_ENDED | OF_GPIO_OPEN_DRAIN);
+ pr_info("%s uses legacy open drain flag - update the DTS if you can\n",
+ of_node_full_name(np));
+ }
+}
+
/**
* of_get_named_gpiod_flags() - Get a GPIO descriptor and flags for GPIO API
* @np: device node to get GPIO from
@@ -93,6 +129,9 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
if (IS_ERR(desc))
goto out;
+ if (flags)
+ of_gpio_flags_quirks(np, flags);
+
pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n",
__func__, propname, np, index,
PTR_ERR_OR_ZERO(desc));
@@ -117,6 +156,71 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
+/*
+ * The SPI GPIO bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ char prop_name[32]; /* 32 is max size of property name */
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+
+ /*
+ * Hopefully the compiler stubs the rest of the function if this
+ * is false.
+ */
+ if (!IS_ENABLED(CONFIG_SPI_MASTER))
+ return ERR_PTR(-ENOENT);
+
+ /* Allow this specifically for "spi-gpio" devices */
+ if (!of_device_is_compatible(np, "spi-gpio") || !con_id)
+ return ERR_PTR(-ENOENT);
+
+ /* Will be "gpio-sck", "gpio-mosi" or "gpio-miso" */
+ snprintf(prop_name, sizeof(prop_name), "%s-%s", "gpio", con_id);
+
+ desc = of_get_named_gpiod_flags(np, prop_name, 0, of_flags);
+ return desc;
+}
+
+/*
+ * Some regulator bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ /* These are the connection IDs we accept as legacy GPIO phandles */
+ const char *whitelist[] = {
+ "wlf,ldoena", /* Arizona */
+ "wlf,ldo1ena", /* WM8994 */
+ "wlf,ldo2ena", /* WM8994 */
+ };
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_REGULATOR))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id)
+ return ERR_PTR(-ENOENT);
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++)
+ if (!strcmp(con_id, whitelist[i]))
+ break;
+
+ if (i == ARRAY_SIZE(whitelist))
+ return ERR_PTR(-ENOENT);
+
+ desc = of_get_named_gpiod_flags(np, con_id, 0, of_flags);
+ return desc;
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
@@ -126,6 +230,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
struct gpio_desc *desc;
unsigned int i;
+ /* Try GPIO property "foo-gpios" and "foo-gpio" */
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id)
snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
@@ -140,6 +245,14 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
break;
}
+ /* Special handling for SPI GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_spi_gpio(dev, con_id, &of_flags);
+
+ /* Special handling for regulator GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_regulator_gpio(dev, con_id, &of_flags);
+
if (IS_ERR(desc))
return desc;
@@ -153,8 +266,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_OPEN_SOURCE;
}
- if (of_flags & OF_GPIO_SLEEP_MAY_LOSE_VALUE)
- *flags |= GPIO_SLEEP_MAY_LOSE_VALUE;
+ if (of_flags & OF_GPIO_TRANSITORY)
+ *flags |= GPIO_TRANSITORY;
return desc;
}
@@ -214,6 +327,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
if (xlate_flags & OF_GPIO_ACTIVE_LOW)
*lflags |= GPIO_ACTIVE_LOW;
+ if (xlate_flags & OF_GPIO_TRANSITORY)
+ *lflags |= GPIO_TRANSITORY;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 3f454eaf2101..3dbaf489a8a5 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
+#include <linux/ctype.h>
#include "gpiolib.h"
@@ -106,8 +107,14 @@ static ssize_t value_show(struct device *dev,
mutex_lock(&data->mutex);
- status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
+ status = gpiod_get_value_cansleep(desc);
+ if (status < 0)
+ goto err;
+ buf[0] = '0' + status;
+ buf[1] = '\n';
+ status = 2;
+err:
mutex_unlock(&data->mutex);
return status;
@@ -118,7 +125,7 @@ static ssize_t value_store(struct device *dev,
{
struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status = 0;
mutex_lock(&data->mutex);
@@ -127,7 +134,11 @@ static ssize_t value_store(struct device *dev,
} else {
long value;
- status = kstrtol(buf, 0, &value);
+ if (size <= 2 && isdigit(buf[0]) &&
+ (size == 1 || buf[1] == '\n'))
+ value = buf[0] - '0';
+ else
+ status = kstrtol(buf, 0, &value);
if (status == 0) {
gpiod_set_value_cansleep(desc, value);
status = size;
@@ -138,7 +149,7 @@ static ssize_t value_store(struct device *dev,
return status;
}
-static DEVICE_ATTR_RW(value);
+static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
@@ -474,11 +485,15 @@ static ssize_t export_store(struct class *class,
status = -ENODEV;
goto done;
}
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
+
+ status = gpiod_set_transitory(desc, false);
+ if (!status) {
+ status = gpiod_export(desc, true);
+ if (status < 0)
+ gpiod_free(desc);
+ else
+ set_bit(FLAG_SYSFS, &desc->flags);
+ }
done:
if (status)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 14532d9576e4..36ca5064486e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- if (!desc || !desc->gdev || !desc->gdev->chip)
+ if (!desc || !desc->gdev)
return NULL;
return desc->gdev->chip;
}
@@ -196,7 +196,7 @@ static int gpiochip_find_base(int ngpio)
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
*
- * Return GPIOF_DIR_IN or GPIOF_DIR_OUT, or an error code in case of error.
+ * Returns 0 for output, 1 for input, or an error code in case of error.
*
* This function may sleep if gpiod_cansleep() is true.
*/
@@ -460,6 +460,15 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
return -EINVAL;
+ /*
+ * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+ * the hardware actually supports enabling both at the same time the
+ * electrical result would be disastrous.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
/* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
@@ -506,6 +515,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ ret = gpiod_set_transitory(desc, false);
+ if (ret < 0)
+ goto out_free_descs;
+
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -588,6 +601,9 @@ out_free_lh:
* @events: KFIFO for the GPIO events
* @read_lock: mutex lock to protect reads from colliding with adding
* new events to the FIFO
+ * @timestamp: cache for the timestamp storing it between hardirq
+ * and IRQ thread, used to bring the timestamp close to the actual
+ * event
*/
struct lineevent_state {
struct gpio_device *gdev;
@@ -598,17 +614,18 @@ struct lineevent_state {
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
struct mutex read_lock;
+ u64 timestamp;
};
#define GPIOEVENT_REQUEST_VALID_FLAGS \
(GPIOEVENT_REQUEST_RISING_EDGE | \
GPIOEVENT_REQUEST_FALLING_EDGE)
-static unsigned int lineevent_poll(struct file *filep,
+static __poll_t lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct lineevent_state *le = filep->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
poll_wait(filep, &le->wait, wait);
@@ -732,7 +749,10 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
struct gpioevent_data ge;
int ret, level;
- ge.timestamp = ktime_get_real_ns();
+ /* Do not leak kernel stack to userspace */
+ memset(&ge, 0, sizeof(ge));
+
+ ge.timestamp = le->timestamp;
level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
@@ -760,6 +780,19 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
return IRQ_HANDLED;
}
+static irqreturn_t lineevent_irq_handler(int irq, void *p)
+{
+ struct lineevent_state *le = p;
+
+ /*
+ * Just store the timestamp in hardirq context so we get it as
+ * close in time as possible to the actual event.
+ */
+ le->timestamp = ktime_get_real_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
static int lineevent_create(struct gpio_device *gdev, void __user *ip)
{
struct gpioevent_request eventreq;
@@ -852,7 +885,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
/* Request a thread to read the events */
ret = request_threaded_irq(le->irq,
- NULL,
+ lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
le->label,
@@ -1050,7 +1083,7 @@ static void gpiodevice_release(struct device *dev)
list_del(&gdev->list);
ida_simple_remove(&gpio_ida, gdev->id);
- kfree(gdev->label);
+ kfree_const(gdev->label);
kfree(gdev->descs);
kfree(gdev);
}
@@ -1159,10 +1192,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_descs;
}
- if (chip->label)
- gdev->label = kstrdup(chip->label, GFP_KERNEL);
- else
- gdev->label = kstrdup("unknown", GFP_KERNEL);
+ gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
status = -ENOMEM;
goto err_free_descs;
@@ -1209,31 +1239,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct gpio_desc *desc = &gdev->descs[i];
desc->gdev = gdev;
- /*
- * REVISIT: most hardware initializes GPIOs as inputs
- * (often with pullups enabled) so power usage is
- * minimized. Linux code should set the gpio direction
- * first thing; but until it does, and in case
- * chip->get_direction is not set, we may expose the
- * wrong direction in sysfs.
- */
- if (chip->get_direction) {
- /*
- * If we have .get_direction, set up the initial
- * direction flag from the hardware.
- */
- int dir = chip->get_direction(chip, i);
-
- if (!dir)
- set_bit(FLAG_IS_OUT, &desc->flags);
- } else if (!chip->direction_input) {
- /*
- * If the chip lacks the .direction_input callback
- * we logically assume all lines are outputs.
- */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
#ifdef CONFIG_PINCTRL
@@ -1283,7 +1296,7 @@ err_remove_from_list:
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
- kfree(gdev->label);
+ kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
err_free_gdev:
@@ -1383,7 +1396,7 @@ static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_gpiochip_add_data() - Resource manager piochip_add_data()
+ * devm_gpiochip_add_data() - Resource manager gpiochip_add_data()
* @dev: the device pointer on which irq_chip belongs to.
* @chip: the chip to register, with chip->base initialized
* @data: driver-private data associated with this chip
@@ -1510,14 +1523,15 @@ static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
gpiochip->irq.valid_mask = NULL;
}
-static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
- unsigned int offset)
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset)
{
/* No mask means all valid */
if (likely(!gpiochip->irq.valid_mask))
return true;
return test_bit(offset, gpiochip->irq.valid_mask);
}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
/**
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
@@ -2174,40 +2188,37 @@ done:
* macro to avoid endless duplication. If the desc is NULL it is an
* optional GPIO and calls should just bail out.
*/
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer)\n", func);
+ return PTR_ERR(desc);
+ }
+ if (!desc->gdev) {
+ pr_warn("%s: invalid GPIO (no device)\n", func);
+ return -EINVAL;
+ }
+ if (!desc->gdev->chip) {
+ dev_warn(&desc->gdev->dev,
+ "%s: backing chip is gone\n", func);
+ return 0;
+ }
+ return 1;
+}
+
#define VALIDATE_DESC(desc) do { \
- if (!desc) \
- return 0; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return PTR_ERR(desc); \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
- return -EINVAL; \
- } \
- if ( !desc->gdev->chip ) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
- return 0; \
- } } while (0)
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc) \
- return; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return; \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
- return; \
- } \
- if (!desc->gdev->chip) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
return; \
- } } while (0)
-
+ } while (0)
int gpiod_request(struct gpio_desc *desc, const char *label)
{
@@ -2456,7 +2467,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- struct gpio_chip *gc = desc->gdev->chip;
+ struct gpio_chip *gc;
int ret;
VALIDATE_DESC(desc);
@@ -2473,6 +2484,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
return -EIO;
}
+ gc = desc->gdev->chip;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
@@ -2530,6 +2542,50 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
/**
+ * gpiod_set_transitory - Lose or retain GPIO state on suspend or reset
+ * @desc: descriptor of the GPIO for which to configure persistence
+ * @transitory: True to lose state on suspend or reset, false for persistence
+ *
+ * Returns:
+ * 0 on success, otherwise a negative error code.
+ */
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ struct gpio_chip *chip;
+ unsigned long packed;
+ int gpio;
+ int rc;
+
+ VALIDATE_DESC(desc);
+ /*
+ * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * persistence state.
+ */
+ if (transitory)
+ set_bit(FLAG_TRANSITORY, &desc->flags);
+ else
+ clear_bit(FLAG_TRANSITORY, &desc->flags);
+
+ /* If the driver supports it, set the persistence state now */
+ chip = desc->gdev->chip;
+ if (!chip->set_config)
+ return 0;
+
+ packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
+ !transitory);
+ gpio = gpio_chip_hwgpio(desc);
+ rc = chip->set_config(chip, gpio, packed);
+ if (rc == -ENOTSUPP) {
+ dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
+ gpio);
+ return 0;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gpiod_set_transitory);
+
+/**
* gpiod_is_active_low - test whether a GPIO is active-low or not
* @desc: the gpio descriptor to test
*
@@ -3129,8 +3185,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return false;
- return !test_bit(FLAG_SLEEP_MAY_LOSE_VALUE,
- &chip->gpiodev->descs[offset].flags);
+ return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -3565,8 +3620,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if (lflags & GPIO_SLEEP_MAY_LOSE_VALUE)
- set_bit(FLAG_SLEEP_MAY_LOSE_VALUE, &desc->flags);
+
+ status = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
+ if (status < 0)
+ return status;
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
@@ -3606,6 +3663,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
struct gpio_desc *desc = NULL;
int status;
enum gpio_lookup_flags lookupflags = 0;
+ /* Maybe we have a device name, maybe not */
+ const char *devname = dev ? dev_name(dev) : "?";
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -3634,7 +3693,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- status = gpiod_request(desc, con_id);
+ /*
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
+ */
+ status = gpiod_request(desc, con_id ? con_id : devname);
if (status < 0)
return ERR_PTR(status);
@@ -3650,17 +3713,88 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
+ * gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags. If the node does not have the requested GPIO
+ * property, NULL is returned.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+ unsigned long lflags = 0;
+ enum of_gpio_flags flags;
+ bool active_low = false;
+ bool single_ended = false;
+ bool open_drain = false;
+ bool transitory = false;
+ int ret;
+
+ desc = of_get_named_gpiod_flags(node, propname,
+ index, &flags);
+
+ if (!desc || IS_ERR(desc)) {
+ /* If it is not there, just return NULL */
+ if (PTR_ERR(desc) == -ENOENT)
+ return NULL;
+ return desc;
+ }
+
+ active_low = flags & OF_GPIO_ACTIVE_LOW;
+ single_ended = flags & OF_GPIO_SINGLE_ENDED;
+ open_drain = flags & OF_GPIO_OPEN_DRAIN;
+ transitory = flags & OF_GPIO_TRANSITORY;
+
+ ret = gpiod_request(desc, label);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (active_low)
+ lflags |= GPIO_ACTIVE_LOW;
+
+ if (single_ended) {
+ if (open_drain)
+ lflags |= GPIO_OPEN_DRAIN;
+ else
+ lflags |= GPIO_OPEN_SOURCE;
+ }
+
+ if (transitory)
+ lflags |= GPIO_TRANSITORY;
+
+ ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ if (ret < 0) {
+ gpiod_put(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL(gpiod_get_from_of_node);
+
+/**
* fwnode_get_named_gpiod - obtain a GPIO from firmware node
* @fwnode: handle of the firmware node
* @propname: name of the firmware property representing the GPIO
- * @index: index of the GPIO to obtain in the consumer
+ * @index: index of the GPIO to obtain for the consumer
* @dflags: GPIO initialization flags
* @label: label to attach to the requested GPIO
*
* This function can be used for drivers that get their configuration
- * from firmware.
+ * from opaque firmware.
*
- * Function properly finds the corresponding GPIO using whatever is the
+ * The function properly finds the corresponding GPIO using whatever is the
* underlying firmware interface and then makes sure that the GPIO
* descriptor is requested before it is returned to the caller.
*
@@ -3677,53 +3811,35 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
{
struct gpio_desc *desc = ERR_PTR(-ENODEV);
unsigned long lflags = 0;
- bool active_low = false;
- bool single_ended = false;
- bool open_drain = false;
int ret;
if (!fwnode)
return ERR_PTR(-EINVAL);
if (is_of_node(fwnode)) {
- enum of_gpio_flags flags;
-
- desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname,
- index, &flags);
- if (!IS_ERR(desc)) {
- active_low = flags & OF_GPIO_ACTIVE_LOW;
- single_ended = flags & OF_GPIO_SINGLE_ENDED;
- open_drain = flags & OF_GPIO_OPEN_DRAIN;
- }
+ desc = gpiod_get_from_of_node(to_of_node(fwnode),
+ propname, index,
+ dflags,
+ label);
+ return desc;
} else if (is_acpi_node(fwnode)) {
struct acpi_gpio_info info;
desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
- if (!IS_ERR(desc)) {
- active_low = info.polarity == GPIO_ACTIVE_LOW;
- ret = acpi_gpio_update_gpiod_flags(&dflags, info.flags);
- if (ret)
- pr_debug("Override GPIO initialization flags\n");
- }
- }
+ if (IS_ERR(desc))
+ return desc;
- if (IS_ERR(desc))
- return desc;
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+ if (info.polarity == GPIO_ACTIVE_LOW)
+ lflags |= GPIO_ACTIVE_LOW;
+ }
+
+ /* Currently only ACPI takes this path */
ret = gpiod_request(desc, label);
if (ret)
return ERR_PTR(ret);
- if (active_low)
- lflags |= GPIO_ACTIVE_LOW;
-
- if (single_ended) {
- if (open_drain)
- lflags |= GPIO_OPEN_DRAIN;
- else
- lflags |= GPIO_OPEN_SOURCE;
- }
-
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 6c44d1652139..b17ec6795c81 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -58,7 +58,7 @@ struct gpio_device {
struct gpio_desc *descs;
int base;
u16 ngpio;
- char *label;
+ const char *label;
void *data;
struct list_head list;
@@ -75,16 +75,20 @@ struct gpio_device {
/**
* struct acpi_gpio_info - ACPI GPIO specific information
+ * @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
struct acpi_gpio_info {
+ struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
int polarity;
int triggering;
+ unsigned int quirks;
};
/* gpio suffixes used for ACPI and device tree lookup */
@@ -124,7 +128,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
- enum gpiod_flags update);
+ struct acpi_gpio_info *info);
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
@@ -149,7 +153,7 @@ static inline void
acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
static inline int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
{
return 0;
}
@@ -189,6 +193,12 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_desc **desc_array,
int *value_array);
+/* This is just passed between gpiolib and devres */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
+
extern struct spinlock gpio_lock;
extern struct list_head gpio_devices;
@@ -205,7 +215,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_SLEEP_MAY_LOSE_VALUE 12 /* GPIO may lose value in sleep */
+#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b3c6e997ccdb..9a17bd3639d1 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -559,10 +559,10 @@ EXPORT_SYMBOL(drm_read);
*
* Mask of POLL flags indicating the current status of the file.
*/
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file_priv->event_wait, wait);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 59ee808f8fd9..d453756ca128 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2331,12 +2331,12 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
+static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- unsigned int events = 0;
+ __poll_t events = 0;
stream->ops->poll_wait(stream, file, wait);
@@ -2365,11 +2365,11 @@ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
+static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
struct drm_i915_private *dev_priv = stream->dev_priv;
- int ret;
+ __poll_t ret;
mutex_lock(&dev_priv->perf.lock);
ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fdc56c1c953..b9bfa806d346 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -982,25 +982,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
- x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL)
- return -ENOMEM;
- y = kmalloc(ybuf_size, GFP_KERNEL);
- if (y == NULL) {
- kfree(x);
- return -ENOMEM;
- }
- if (copy_from_user(x, depth->x, xbuf_size)) {
- kfree(x);
- kfree(y);
- return -EFAULT;
- }
- if (copy_from_user(y, depth->y, xbuf_size)) {
+ x = memdup_user(depth->x, xbuf_size);
+ if (IS_ERR(x))
+ return PTR_ERR(x);
+ y = memdup_user(depth->y, ybuf_size);
+ if (IS_ERR(y)) {
kfree(x);
- kfree(y);
- return -EFAULT;
+ return PTR_ERR(y);
}
-
buffer_size = depth->n * sizeof(u32);
buffer = memdup_user(depth->buffer, buffer_size);
if (IS_ERR(buffer)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 7e5f30e234b1..d08753e8fd94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -713,7 +713,7 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern unsigned int vmw_fops_poll(struct file *filp,
+extern __poll_t vmw_fops_poll(struct file *filp,
struct poll_table_struct *wait);
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 01be355525e4..67f844678ac8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -412,7 +412,7 @@ out_clips:
* Wrapper around the drm_poll function that makes sure the device is
* processing the fifo if drm_poll decides to wait.
*/
-unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv =
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index d35d6d271f3f..dfd8d0048980 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1266,7 +1266,7 @@ done:
return ret_val;
}
-static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
+static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait)
{
pr_debug("%s\n", __func__);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 779c5ae47f36..19c499f5623d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -280,6 +280,7 @@ config HID_ELECOM
---help---
Support for ELECOM devices:
- BM084 Bluetooth Mouse
+ - EX-G Trackball (Wired and wireless)
- DEFT Trackball (Wired and wireless)
- HUGE Trackball (Wired and wireless)
@@ -396,6 +397,17 @@ config HID_ITE
---help---
Support for ITE devices not fully compliant with HID standard.
+config HID_JABRA
+ tristate "Jabra USB HID Driver"
+ depends on HID
+ ---help---
+ Support for Jabra USB HID devices.
+
+ Prevents mapping of vendor defined HID usages to input events. Without
+ this driver HID reports from Jabra devices may incorrectly be seen as
+ mouse button events.
+ Say M here if you may ever plug in a Jabra USB device.
+
config HID_TWINHAN
tristate "Twinhan IR remote control"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 235bd2a7b333..eb13b9e92d85 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for the HID driver
#
-hid-y := hid-core.o hid-input.o
+hid-y := hid-core.o hid-input.o hid-quirks.o
hid-$(CONFIG_DEBUG_FS) += hid-debug.o
obj-$(CONFIG_HID) += hid.o
@@ -52,6 +52,7 @@ obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_ICADE) += hid-icade.o
obj-$(CONFIG_HID_ITE) += hid-ite.o
+obj-$(CONFIG_HID_JABRA) += hid-jabra.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 1bb7b63b3150..88b9703318e4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -26,6 +26,7 @@
* any later version.
*/
+#include <linux/dmi.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/input/mt.h>
@@ -119,6 +120,24 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.max_contacts = 5,
};
+static const struct asus_touchpad_info asus_t100ha_tp = {
+ .max_x = 2640,
+ .max_y = 1320,
+ .res_x = 30, /* units/mm */
+ .res_y = 29, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
+static const struct asus_touchpad_info asus_t200ta_tp = {
+ .max_x = 3120,
+ .max_y = 1716,
+ .res_x = 30, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
static const struct asus_touchpad_info asus_t100chi_tp = {
.max_x = 2640,
.max_y = 1320,
@@ -606,7 +625,17 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
drvdata->quirks = QUIRK_SKIP_INPUT_MAPPING;
- drvdata->tp = &asus_t100ta_tp;
+ /*
+ * The T100HA uses the same USB-ids as the T100TAF and
+ * the T200TA uses the same USB-ids as the T100TA, while
+ * both have different max x/y values as the T100TA[F].
+ */
+ if (dmi_match(DMI_PRODUCT_NAME, "T100HAN"))
+ drvdata->tp = &asus_t100ha_tp;
+ else if (dmi_match(DMI_PRODUCT_NAME, "T200TA"))
+ drvdata->tp = &asus_t200ta_tp;
+ else
+ drvdata->tp = &asus_t100ta_tp;
}
}
@@ -686,9 +715,10 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
- /* For the T100TA keyboard dock */
+ /* For the T100TA/T200TA keyboard dock */
if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
- *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+ (*rsize == 76 || *rsize == 101) &&
+ rdesc[73] == 0x81 && rdesc[74] == 0x01) {
hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
}
@@ -751,7 +781,10 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+ USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
+ QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c3f608131cf..c2560aae5542 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -830,31 +830,6 @@ static int hid_scan_report(struct hid_device *hid)
break;
}
- /* fall back to generic driver in case specific driver doesn't exist */
- switch (hid->group) {
- case HID_GROUP_MULTITOUCH_WIN_8:
- /* fall-through */
- case HID_GROUP_MULTITOUCH:
- if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_SENSOR_HUB:
- if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_RMI:
- if (!IS_ENABLED(CONFIG_HID_RMI))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_WACOM:
- if (!IS_ENABLED(CONFIG_HID_WACOM))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_LOGITECH_DJ_DEVICE:
- if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
- hid->group = HID_GROUP_GENERIC;
- break;
- }
vfree(parser);
return 0;
}
@@ -1597,8 +1572,8 @@ unlock:
}
EXPORT_SYMBOL_GPL(hid_input_report);
-static bool hid_match_one_id(struct hid_device *hdev,
- const struct hid_device_id *id)
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id)
{
return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
@@ -1606,7 +1581,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
(id->product == HID_ANY_ID || id->product == hdev->product);
}
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
@@ -1862,541 +1837,6 @@ void hid_hw_close(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_hw_close);
-/*
- * A list of devices for which there is a specialized driver on HID bus.
- *
- * Please note that for multitouch devices (driven by hid-multitouch driver),
- * there is a proper autodetection and autoloading in place (based on presence
- * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
- * as we are doing the right thing in hid_scan_usage().
- *
- * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
- * physical is found inside a usage page of type sensor, hid-sensor-hub will be
- * used as a driver. See hid_scan_report().
- */
-static const struct hid_device_id hid_have_special_driver[] = {
-#if IS_ENABLED(CONFIG_HID_A4TECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACRUX)
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ALPS)
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLE)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLEIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ASUS)
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_AUREAL)
- { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BELKIN)
- { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BETOP_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHERRY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHICONY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CMEDIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CORSAIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CP2112)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CYPRESS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELECOM)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELO)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EMS_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EZKEY)
- { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GEMBIRD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GFRM)
- { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
- { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GREENASIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GT683R)
- { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GYRATION)
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
-#endif
-#if IS_ENABLED(CONFIG_HID_HOLTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ICADE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ITE)
- { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KENSINGTON)
- { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KYE)
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LCPOWER)
- { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LED)
- { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LENOVO)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAYFLASH)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MICROSOFT)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MONTEREY)
- { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WIIMOTE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTI)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTRIG)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ORTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PENMOUNT)
- { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PETALYNX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PICOLCD)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRIMAX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RETRODE)
- { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RMI)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ROCCAT)
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAITEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAMSUNG)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SONY)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
- { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_STEELSERIES)
- { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SUNPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
-#endif
-#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TIVO)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TOPSEED)
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TWINHAN)
- { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UCLOGIC)
- { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
- { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WALTOP)
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
-#endif
-#if IS_ENABLED(CONFIG_HID_XINMO)
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZYDACRON)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
-#endif
- { }
-};
-
struct hid_dynid {
struct list_head list;
struct hid_device_id id;
@@ -2463,8 +1903,8 @@ static void hid_free_dynids(struct hid_driver *hdrv)
spin_unlock(&hdrv->dyn_lock);
}
-static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
- struct hid_driver *hdrv)
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv)
{
struct hid_dynid *dynid;
@@ -2479,6 +1919,7 @@ static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
return hid_match_id(hdev, hdrv->id_table);
}
+EXPORT_SYMBOL_GPL(hid_match_device);
static int hid_bus_match(struct device *dev, struct device_driver *drv)
{
@@ -2508,6 +1949,23 @@ static int hid_device_probe(struct device *dev)
goto unlock;
}
+ if (hdrv->match) {
+ if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ } else {
+ /*
+ * hid-generic implements .match(), so if
+ * hid_ignore_special_drivers is set, we can safely
+ * return.
+ */
+ if (hid_ignore_special_drivers) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ }
+
hdev->driver = hdrv;
if (hdrv->probe) {
ret = hdrv->probe(hdev, id);
@@ -2604,7 +2062,7 @@ static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static struct bus_type hid_bus_type = {
+struct bus_type hid_bus_type = {
.name = "hid",
.dev_groups = hid_dev_groups,
.drv_groups = hid_drv_groups,
@@ -2613,315 +2071,7 @@ static struct bus_type hid_bus_type = {
.remove = hid_device_remove,
.uevent = hid_uevent,
};
-
-/* a list of devices that shouldn't be handled by HID core at all */
-static const struct hid_device_id hid_ignore_list[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
- { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
- { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
-#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
-#endif
- { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
- { }
-};
-
-/**
- * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
- *
- * There are composite devices for which we want to ignore only a certain
- * interface. This is a list of devices for which only the mouse interface will
- * be ignored. This allows a dedicated driver to take care of the interface.
- */
-static const struct hid_device_id hid_mouse_ignore_list[] = {
- /* appletouch driver */
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
- { }
-};
-
-bool hid_ignore(struct hid_device *hdev)
-{
- if (hdev->quirks & HID_QUIRK_NO_IGNORE)
- return false;
- if (hdev->quirks & HID_QUIRK_IGNORE)
- return true;
-
- switch (hdev->vendor) {
- case USB_VENDOR_ID_CODEMERCS:
- /* ignore all Code Mercenaries IOWarrior devices */
- if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
- hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
- return true;
- break;
- case USB_VENDOR_ID_LOGITECH:
- if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
- hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
- return true;
- /*
- * The Keene FM transmitter USB device has the same USB ID as
- * the Logitech AudioHub Speaker, but it should ignore the hid.
- * Check if the name is that of the Keene device.
- * For reference: the name of the AudioHub is
- * "HOLTEK AudioHub Speaker".
- */
- if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
- !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
- return true;
- break;
- case USB_VENDOR_ID_SOUNDGRAPH:
- if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
- hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
- return true;
- break;
- case USB_VENDOR_ID_HANWANG:
- if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
- hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
- return true;
- break;
- case USB_VENDOR_ID_JESS:
- if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
- hdev->type == HID_TYPE_USBNONE)
- return true;
- break;
- case USB_VENDOR_ID_VELLEMAN:
- /* These are not HID devices. They are handled by comedi. */
- if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
- (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
- return true;
- break;
- case USB_VENDOR_ID_ATMEL_V_USB:
- /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
- * it has the same USB ID as many Atmel V-USB devices. This
- * usb radio is handled by radio-ma901.c driver so we want
- * ignore the hid. Check the name, bus, product and ignore
- * if we have MA901 usb radio.
- */
- if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
- hdev->bus == BUS_USB &&
- strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
- return true;
- break;
- }
-
- if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
- return true;
-
- return !!hid_match_id(hdev, hid_ignore_list);
-}
-EXPORT_SYMBOL_GPL(hid_ignore);
+EXPORT_SYMBOL(hid_bus_type);
int hid_add_device(struct hid_device *hdev)
{
@@ -2931,6 +2081,8 @@ int hid_add_device(struct hid_device *hdev)
if (WARN_ON(hdev->status & HID_STAT_ADDED))
return -EBUSY;
+ hdev->quirks = hid_lookup_quirk(hdev);
+
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
if (hid_ignore(hdev))
@@ -2960,7 +2112,7 @@ int hid_add_device(struct hid_device *hdev)
if (hid_ignore_special_drivers) {
hdev->group = HID_GROUP_GENERIC;
} else if (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver)) {
+ !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
@@ -3044,6 +2196,29 @@ void hid_destroy_device(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_destroy_device);
+
+static int __bus_add_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *added_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_add_driver)
+ hdrv->bus_add_driver(added_hdrv);
+
+ return 0;
+}
+
+static int __bus_removed_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *removed_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_removed_driver)
+ hdrv->bus_removed_driver(removed_hdrv);
+
+ return 0;
+}
+
int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
const char *mod_name)
{
@@ -3055,6 +2230,8 @@ int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
INIT_LIST_HEAD(&hdrv->dyn_list);
spin_lock_init(&hdrv->dyn_lock);
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_add_driver);
+
return driver_register(&hdrv->driver);
}
EXPORT_SYMBOL_GPL(__hid_register_driver);
@@ -3063,6 +2240,8 @@ void hid_unregister_driver(struct hid_driver *hdrv)
{
driver_unregister(&hdrv->driver);
hid_free_dynids(hdrv);
+
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
}
EXPORT_SYMBOL_GPL(hid_unregister_driver);
@@ -3117,6 +2296,7 @@ static void __exit hid_exit(void)
hid_debug_exit();
hidraw_exit();
bus_unregister(&hid_bus_type);
+ hid_quirks_exit(HID_BUS_ANY);
}
module_init(hid_init);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 5271db593478..c783fd5ef809 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1179,7 +1179,7 @@ out:
return ret;
}
-static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
+static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
{
struct hid_debug_list *list = file->private_data;
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 54aeea57d209..1a1ecc491c02 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -1,9 +1,15 @@
/*
- * HID driver for ELECOM devices.
+ * HID driver for ELECOM devices:
+ * - BM084 Bluetooth Mouse
+ * - EX-G Trackball (Wired and wireless)
+ * - DEFT Trackball (Wired and wireless)
+ * - HUGE Trackball (Wired and wireless)
+ *
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
+ * Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
*/
/*
@@ -19,6 +25,34 @@
#include "hid-ids.h"
+/*
+ * Certain ELECOM mice misreport their button count meaning that they only work
+ * correctly with the ELECOM mouse assistant software which is unavailable for
+ * Linux. A four extra INPUT reports and a FEATURE report are described by the
+ * report descriptor but it does not appear that these enable software to
+ * control what the extra buttons map to. The only simple and straightforward
+ * solution seems to involve fixing up the report descriptor.
+ *
+ * Report descriptor format:
+ * Positions 13, 15, 21 and 31 store the button bit count, button usage minimum,
+ * button usage maximum and padding bit count respectively.
+ */
+#define MOUSE_BUTTONS_MAX 8
+static void mouse_button_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int rsize,
+ int nbuttons)
+{
+ if (rsize < 32 || rdesc[12] != 0x95 ||
+ rdesc[14] != 0x75 || rdesc[15] != 0x01 ||
+ rdesc[20] != 0x29 || rdesc[30] != 0x75)
+ return;
+ hid_info(hdev, "Fixing up Elecom mouse button count\n");
+ nbuttons = clamp(nbuttons, 0, MOUSE_BUTTONS_MAX);
+ rdesc[13] = nbuttons;
+ rdesc[21] = nbuttons;
+ rdesc[31] = MOUSE_BUTTONS_MAX - nbuttons;
+}
+
static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -31,45 +65,15 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[47] = 0x00;
}
break;
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRED:
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRELESS:
+ mouse_button_fixup(hdev, rdesc, *rsize, 6);
+ break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
- /* The DEFT/HUGE trackball has eight buttons, but its descriptor
- * only reports five, disabling the three Fn buttons on the top
- * of the mouse.
- *
- * Apply the following diff to the descriptor:
- *
- * Collection (Physical), Collection (Physical),
- * Report ID (1), Report ID (1),
- * Report Count (5), -> Report Count (8),
- * Report Size (1), Report Size (1),
- * Usage Page (Button), Usage Page (Button),
- * Usage Minimum (01h), Usage Minimum (01h),
- * Usage Maximum (05h), -> Usage Maximum (08h),
- * Logical Minimum (0), Logical Minimum (0),
- * Logical Maximum (1), Logical Maximum (1),
- * Input (Variable), Input (Variable),
- * Report Count (1), -> Report Count (0),
- * Report Size (3), Report Size (3),
- * Input (Constant), Input (Constant),
- * Report Size (16), Report Size (16),
- * Report Count (2), Report Count (2),
- * Usage Page (Desktop), Usage Page (Desktop),
- * Usage (X), Usage (X),
- * Usage (Y), Usage (Y),
- * Logical Minimum (-32768), Logical Minimum (-32768),
- * Logical Maximum (32767), Logical Maximum (32767),
- * Input (Variable, Relative), Input (Variable, Relative),
- * End Collection, End Collection,
- */
- if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
- hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
- rdesc[13] = 8; /* Button/Variable Report Count */
- rdesc[21] = 8; /* Button/Variable Usage Maximum */
- rdesc[29] = 0; /* Button/Constant Report Count */
- }
+ mouse_button_fixup(hdev, rdesc, *rsize, 8);
break;
}
return rdesc;
@@ -77,6 +81,8 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0cd4f7216239..5eea6fe0d7bd 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
{
struct input_dev *input = hidinput->input;
+ /*
+ * ELO devices have one Button usage in GenDesk field, which makes
+ * hid-input map it to BTN_LEFT; that confuses userspace, which then
+ * considers the device to be a mouse/touchpad instead of touchscreen.
+ */
+ clear_bit(BTN_LEFT, input->keybit);
set_bit(BTN_TOUCH, input->keybit);
set_bit(ABS_PRESSURE, input->absbit);
input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index e288a4a06fe8..3c0a1bf433d7 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -24,8 +24,71 @@
#include <linux/hid.h>
+static struct hid_driver hid_generic;
+
+static int __unmap_hid_generic(struct device *dev, void *data)
+{
+ struct hid_driver *hdrv = data;
+ struct hid_device *hdev = to_hid_device(dev);
+
+ /* only unbind matching devices already bound to hid-generic */
+ if (hdev->driver != &hid_generic ||
+ hid_match_device(hdev, hdrv) == NULL)
+ return 0;
+
+ if (dev->parent) /* Needed for USB */
+ device_lock(dev->parent);
+ device_release_driver(dev);
+ if (dev->parent)
+ device_unlock(dev->parent);
+
+ return 0;
+}
+
+static void hid_generic_add_driver(struct hid_driver *hdrv)
+{
+ bus_for_each_dev(&hid_bus_type, NULL, hdrv, __unmap_hid_generic);
+}
+
+static void hid_generic_removed_driver(struct hid_driver *hdrv)
+{
+ int ret;
+
+ ret = driver_attach(&hid_generic.driver);
+}
+
+static int __check_hid_generic(struct device_driver *drv, void *data)
+{
+ struct hid_driver *hdrv = to_hid_driver(drv);
+ struct hid_device *hdev = data;
+
+ if (hdrv == &hid_generic)
+ return 0;
+
+ return hid_match_device(hdev, hdrv) != NULL;
+}
+
+static bool hid_generic_match(struct hid_device *hdev,
+ bool ignore_special_driver)
+{
+ if (ignore_special_driver)
+ return true;
+
+ if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)
+ return false;
+
+ /*
+ * If any other driver wants the device, leave the device to this other
+ * driver.
+ */
+ if (bus_for_each_drv(&hid_bus_type, NULL, hdev, __check_hid_generic))
+ return false;
+
+ return true;
+}
+
static const struct hid_device_id hid_table[] = {
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_GENERIC, HID_ANY_ID, HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, hid_table);
@@ -33,6 +96,9 @@ MODULE_DEVICE_TABLE(hid, hid_table);
static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
+ .match = hid_generic_match,
+ .bus_add_driver = hid_generic_add_driver,
+ .bus_removed_driver = hid_generic_removed_driver,
};
module_hid_driver(hid_generic);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5da3d6256d25..43ddcdfbd0da 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -178,7 +178,8 @@
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
-#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD 0x1807
#define USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD 0x8502
#define USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD 0x184a
#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585
@@ -370,6 +371,8 @@
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRED 0x00fb
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRELESS 0x00fc
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
@@ -535,6 +538,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -1156,6 +1160,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
diff --git a/drivers/hid/hid-jabra.c b/drivers/hid/hid-jabra.c
new file mode 100644
index 000000000000..1f52daf14426
--- /dev/null
+++ b/drivers/hid/hid-jabra.c
@@ -0,0 +1,58 @@
+/*
+ * Jabra USB HID Driver
+ *
+ * Copyright (c) 2017 Niels Skou Olsen <nolsen@jabra.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define HID_UP_VENDOR_DEFINED_MIN 0xff000000
+#define HID_UP_VENDOR_DEFINED_MAX 0xffff0000
+
+static int jabra_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int is_vendor_defined =
+ ((usage->hid & HID_USAGE_PAGE) >= HID_UP_VENDOR_DEFINED_MIN &&
+ (usage->hid & HID_USAGE_PAGE) <= HID_UP_VENDOR_DEFINED_MAX);
+
+ dbg_hid("hid=0x%08x appl=0x%08x coll_idx=0x%02x usage_idx=0x%02x: %s\n",
+ usage->hid,
+ field->application,
+ usage->collection_index,
+ usage->usage_index,
+ is_vendor_defined ? "ignored" : "defaulted");
+
+ /* Ignore vendor defined usages, default map standard usages */
+ return is_vendor_defined ? -1 : 0;
+}
+
+static const struct hid_device_id jabra_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, jabra_devices);
+
+static struct hid_driver jabra_driver = {
+ .name = "jabra",
+ .id_table = jabra_devices,
+ .input_mapping = jabra_input_mapping,
+};
+module_hid_driver(jabra_driver);
+
+MODULE_AUTHOR("Niels Skou Olsen <nolsen@jabra.com>");
+MODULE_DESCRIPTION("Jabra USB HID Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 65ea23be9677..3b4739bde05d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -85,11 +85,12 @@ MODULE_LICENSE("GPL");
#define MT_IO_FLAGS_PENDING_SLOTS 2
struct mt_slot {
- __s32 x, y, cx, cy, p, w, h;
+ __s32 x, y, cx, cy, p, w, h, a;
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
bool inrange_state; /* is the finger in proximity of the sensor? */
bool confidence_state; /* is the touch made by a finger? */
+ bool has_azimuth; /* the contact reports azimuth */
};
struct mt_class {
@@ -119,6 +120,10 @@ struct mt_device {
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
int cc_index; /* contact count field index in the report */
int cc_value_index; /* contact count value index in the field */
+ int scantime_index; /* scantime field index in the report */
+ int scantime_val_index; /* scantime value index in the field */
+ int prev_scantime; /* scantime reported in the previous packet */
+ int left_button_state; /* left button state */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
unsigned long initial_quirks; /* initial quirks state */
@@ -582,8 +587,15 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!(cls->quirks & MT_QUIRK_NO_AREA)) {
set_abs(hi->input, ABS_MT_TOUCH_MINOR, field,
cls->sn_height);
- input_set_abs_params(hi->input,
- ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ /*
+ * Only set ABS_MT_ORIENTATION if it is not
+ * already set by the HID_DG_AZIMUTH usage.
+ */
+ if (!test_bit(ABS_MT_ORIENTATION,
+ hi->input->absbit))
+ input_set_abs_params(hi->input,
+ ABS_MT_ORIENTATION, 0, 1, 0, 0);
}
mt_store_field(usage, td, hi);
return 1;
@@ -599,6 +611,12 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
EV_MSC, MSC_TIMESTAMP);
input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
mt_store_field(usage, td, hi);
+ /* Ignore if indexes are out of bounds. */
+ if (field->index >= field->report->maxfield ||
+ usage->usage_index >= field->report_count)
+ return 1;
+ td->scantime_index = field->index;
+ td->scantime_val_index = usage->usage_index;
return 1;
case HID_DG_CONTACTCOUNT:
/* Ignore if indexes are out of bounds. */
@@ -608,6 +626,21 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->cc_index = field->index;
td->cc_value_index = usage->usage_index;
return 1;
+ case HID_DG_AZIMUTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_ORIENTATION);
+ /*
+ * Azimuth has the range of [0, MAX) representing a full
+ * revolution. Set ABS_MT_ORIENTATION to a quarter of
+ * MAX according the definition of ABS_MT_ORIENTATION
+ */
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ -field->logical_maximum / 4,
+ field->logical_maximum / 4,
+ cls->sn_move ?
+ field->logical_maximum / cls->sn_move : 0, 0);
+ mt_store_field(usage, td, hi);
+ return 1;
case HID_DG_CONTACTMAX:
/* we don't set td->last_slot_field as contactcount and
* contact max are global to the report */
@@ -700,6 +733,10 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
int wide = (s->w > s->h);
int major = max(s->w, s->h);
int minor = min(s->w, s->h);
+ int orientation = wide;
+
+ if (s->has_azimuth)
+ orientation = s->a;
/*
* divided by two to match visual scale of touch
@@ -716,7 +753,8 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
input_event(input, EV_ABS, ABS_MT_DISTANCE,
!s->touch_state);
- input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION,
+ orientation);
input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
@@ -734,10 +772,16 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
{
+ __s32 cls = td->mtclass.name;
+
+ if (cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL)
+ input_event(input, EV_KEY, BTN_LEFT, td->left_button_state);
+
input_mt_sync_frame(input);
input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
input_sync(input);
td->num_received = 0;
+ td->left_button_state = 0;
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
else
@@ -778,9 +822,11 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
}
static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
+ struct hid_usage *usage, __s32 value,
+ bool first_packet)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
__s32 quirks = td->mtclass.quirks;
struct input_dev *input = field->hidinput->input;
@@ -832,11 +878,49 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT:
break;
+ case HID_DG_AZIMUTH:
+ /*
+ * Azimuth is counter-clockwise and ranges from [0, MAX)
+ * (a full revolution). Convert it to clockwise ranging
+ * [-MAX/2, MAX/2].
+ *
+ * Note that ABS_MT_ORIENTATION require us to report
+ * the limit of [-MAX/4, MAX/4], but the value can go
+ * out of range to [-MAX/2, MAX/2] to report an upside
+ * down ellipsis.
+ */
+ if (value > field->logical_maximum / 2)
+ value -= field->logical_maximum;
+ td->curdata.a = -value;
+ td->curdata.has_azimuth = true;
+ break;
case HID_DG_TOUCH:
/* do nothing */
break;
default:
+ /*
+ * For Win8 PTP touchpads we should only look at
+ * non finger/touch events in the first_packet of
+ * a (possible) multi-packet frame.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ !first_packet)
+ return;
+
+ /*
+ * For Win8 PTP touchpads we map both the clickpad click
+ * and any "external" left buttons to BTN_LEFT if a
+ * device claims to have both we need to report 1 for
+ * BTN_LEFT if either is pressed, so we or all values
+ * together and report the result in mt_sync_frame().
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ usage->type == EV_KEY && usage->code == BTN_LEFT) {
+ td->left_button_state |= value;
+ return;
+ }
+
if (usage->type)
input_event(input, usage->type, usage->code,
value);
@@ -855,9 +939,11 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
struct hid_field *field;
+ bool first_packet;
unsigned count;
- int r, n;
+ int r, n, scantime = 0;
/* sticky fingers release in progress, abort */
if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
@@ -867,13 +953,31 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* Includes multi-packet support where subsequent
* packets are sent with zero contactcount.
*/
+ if (td->scantime_index >= 0) {
+ field = report->field[td->scantime_index];
+ scantime = field->value[td->scantime_val_index];
+ }
if (td->cc_index >= 0) {
struct hid_field *field = report->field[td->cc_index];
int value = field->value[td->cc_value_index];
- if (value)
+
+ /*
+ * For Win8 PTPs the first packet (td->num_received == 0) may
+ * have a contactcount of 0 if there only is a button event.
+ * We double check that this is not a continuation packet
+ * of a possible multi-packet frame be checking that the
+ * timestamp has changed.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ td->num_received == 0 && td->prev_scantime != scantime)
+ td->num_expected = value;
+ /* A non 0 contact count always indicates a first packet */
+ else if (value)
td->num_expected = value;
}
+ td->prev_scantime = scantime;
+ first_packet = td->num_received == 0;
for (r = 0; r < report->maxfield; r++) {
field = report->field[r];
count = field->report_count;
@@ -883,7 +987,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
for (n = 0; n < count; n++)
mt_process_mt_event(hid, field, &field->usage[n],
- field->value[n]);
+ field->value[n], first_packet);
}
if (td->num_received >= td->num_expected)
@@ -1329,6 +1433,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->maxcontact_report_id = -1;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
td->cc_index = -1;
+ td->scantime_index = -1;
td->mt_report_id = -1;
hid_set_drvdata(hdev, td);
@@ -1649,14 +1754,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
- /* Panasonic panels */
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT780) },
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT880) },
-
/* Novatek Panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
@@ -1667,6 +1764,14 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_NTRIG, 0x1b05) },
+ /* Panasonic panels */
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT780) },
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT880) },
+
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
new file mode 100644
index 000000000000..5f6035a5ce36
--- /dev/null
+++ b/drivers/hid/hid-quirks.c
@@ -0,0 +1,1276 @@
+/*
+ * HID quirks support for Linux
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2006-2007 Jiri Kosina
+ * Copyright (c) 2007 Paul Walmsley
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "hid-ids.h"
+
+/*
+ * Alphabetically sorted by vendor then product.
+ */
+
+static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN), HID_QUIRK_MULTI_INPUT},
+ { HID_USB_DEVICE(USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK), HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL), HID_QUIRK_HIDINPUT_FORCE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+
+ { 0 }
+};
+
+/*
+ * A list of devices for which there is a specialized driver on HID bus.
+ *
+ * Please note that for multitouch devices (driven by hid-multitouch driver),
+ * there is a proper autodetection and autoloading in place (based on presence
+ * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
+ * as we are doing the right thing in hid_scan_usage().
+ *
+ * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
+ * physical is found inside a usage page of type sensor, hid-sensor-hub will be
+ * used as a driver. See hid_scan_report().
+ */
+static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_HID_A4TECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACRUX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ALPS)
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLEIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ASUS)
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_AUREAL)
+ { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BELKIN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BETOP_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHERRY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHICONY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CMEDIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CORSAIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CP2112)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CYPRESS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELECOM)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EMS_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EZKEY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GEMBIRD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GFRM)
+ { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
+ { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GREENASIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GT683R)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GYRATION)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+#endif
+#if IS_ENABLED(CONFIG_HID_HOLTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ITE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ICADE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_JABRA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KENSINGTON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KYE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LCPOWER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LENOVO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MICROSOFT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MONTEREY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WIIMOTE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTRIG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ORTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PENMOUNT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PETALYNX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PICOLCD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRIMAX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RETRODE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RMI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ROCCAT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAITEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAMSUNG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SONY)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_STEELSERIES)
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SUNPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+#endif
+#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TIVO)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TOPSEED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TWINHAN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UCLOGIC)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WALTOP)
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
+#endif
+#if IS_ENABLED(CONFIG_HID_XINMO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZYDACRON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+#endif
+ { }
+};
+
+/* a list of devices that shouldn't be handled by HID core at all */
+static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
+#endif
+ { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { }
+};
+
+/**
+ * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
+ *
+ * There are composite devices for which we want to ignore only a certain
+ * interface. This is a list of devices for which only the mouse interface will
+ * be ignored. This allows a dedicated driver to take care of the interface.
+ */
+static const struct hid_device_id hid_mouse_ignore_list[] = {
+ /* appletouch driver */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+};
+
+bool hid_ignore(struct hid_device *hdev)
+{
+ if (hdev->quirks & HID_QUIRK_NO_IGNORE)
+ return false;
+ if (hdev->quirks & HID_QUIRK_IGNORE)
+ return true;
+
+ switch (hdev->vendor) {
+ case USB_VENDOR_ID_CODEMERCS:
+ /* ignore all Code Mercenaries IOWarrior devices */
+ if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
+ hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_LOGITECH:
+ if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
+ hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
+ return true;
+ /*
+ * The Keene FM transmitter USB device has the same USB ID as
+ * the Logitech AudioHub Speaker, but it should ignore the hid.
+ * Check if the name is that of the Keene device.
+ * For reference: the name of the AudioHub is
+ * "HOLTEK AudioHub Speaker".
+ */
+ if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
+ !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
+ return true;
+ break;
+ case USB_VENDOR_ID_SOUNDGRAPH:
+ if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
+ hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_HANWANG:
+ if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
+ hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_JESS:
+ if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
+ hdev->type == HID_TYPE_USBNONE)
+ return true;
+ break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
+ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
+ return true;
+ break;
+ case USB_VENDOR_ID_ATMEL_V_USB:
+ /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
+ * it has the same USB ID as many Atmel V-USB devices. This
+ * usb radio is handled by radio-ma901.c driver so we want
+ * ignore the hid. Check the name, bus, product and ignore
+ * if we have MA901 usb radio.
+ */
+ if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
+ hdev->bus == BUS_USB &&
+ strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
+ return true;
+ break;
+ case USB_VENDOR_ID_ELAN:
+ /*
+ * Many Elan devices have a product id of 0x0401 and are handled
+ * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
+ * is not (and cannot be) handled by that driver ->
+ * Ignore all 0x0401 devs except for the ELAN0800 dev.
+ */
+ if (hdev->product == 0x0401 &&
+ strncmp(hdev->name, "ELAN0800", 8) != 0)
+ return true;
+ break;
+ }
+
+ if (hdev->type == HID_TYPE_USBMOUSE &&
+ hid_match_id(hdev, hid_mouse_ignore_list))
+ return true;
+
+ return !!hid_match_id(hdev, hid_ignore_list);
+}
+EXPORT_SYMBOL_GPL(hid_ignore);
+
+/* Dynamic HID quirks list - specified at runtime */
+struct quirks_list_struct {
+ struct hid_device_id hid_bl_item;
+ struct list_head node;
+};
+
+static LIST_HEAD(dquirks_list);
+static DEFINE_MUTEX(dquirks_lock);
+
+/* Runtime ("dynamic") quirks manipulation functions */
+
+/**
+ * hid_exists_dquirk: find any dynamic quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Scans dquirks_list for a matching dynamic quirk and returns
+ * the pointer to the relevant struct hid_device_id if found.
+ * Must be called with a read lock held on dquirks_lock.
+ *
+ * Returns: NULL if no quirk found, struct hid_device_id * if found.
+ */
+static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
+{
+ struct quirks_list_struct *q;
+ struct hid_device_id *bl_entry = NULL;
+
+ list_for_each_entry(q, &dquirks_list, node) {
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+ bl_entry = &q->hid_bl_item;
+ break;
+ }
+ }
+
+ if (bl_entry != NULL)
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ bl_entry->driver_data, bl_entry->vendor,
+ bl_entry->product);
+
+ return bl_entry;
+}
+
+
+/**
+ * hid_modify_dquirk: add/replace a HID quirk
+ * @id: the HID device to match
+ * @quirks: the unsigned long quirks value to add/replace
+ *
+ * Description:
+ * If an dynamic quirk exists in memory for this device, replace its
+ * quirks value with what was provided. Otherwise, add the quirk
+ * to the dynamic quirks list.
+ *
+ * Returns: 0 OK, -error on failure.
+ */
+static int hid_modify_dquirk(const struct hid_device_id *id,
+ const unsigned long quirks)
+{
+ struct hid_device *hdev;
+ struct quirks_list_struct *q_new, *q;
+ int list_edited = 0;
+ int ret = 0;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
+ if (!q_new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdev->bus = q_new->hid_bl_item.bus = id->bus;
+ hdev->group = q_new->hid_bl_item.group = id->group;
+ hdev->vendor = q_new->hid_bl_item.vendor = id->vendor;
+ hdev->product = q_new->hid_bl_item.product = id->product;
+ q_new->hid_bl_item.driver_data = quirks;
+
+ mutex_lock(&dquirks_lock);
+
+ list_for_each_entry(q, &dquirks_list, node) {
+
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+
+ list_replace(&q->node, &q_new->node);
+ kfree(q);
+ list_edited = 1;
+ break;
+
+ }
+
+ }
+
+ if (!list_edited)
+ list_add_tail(&q_new->node, &dquirks_list);
+
+ mutex_unlock(&dquirks_lock);
+
+ out:
+ kfree(hdev);
+ return ret;
+}
+
+/**
+ * hid_remove_all_dquirks: remove all runtime HID quirks from memory
+ * @bus: bus to match against. Use HID_BUS_ANY if all need to be removed.
+ *
+ * Description:
+ * Free all memory associated with dynamic quirks - called before
+ * module unload.
+ *
+ */
+static void hid_remove_all_dquirks(__u16 bus)
+{
+ struct quirks_list_struct *q, *temp;
+
+ mutex_lock(&dquirks_lock);
+ list_for_each_entry_safe(q, temp, &dquirks_list, node) {
+ if (bus == HID_BUS_ANY || bus == q->hid_bl_item.bus) {
+ list_del(&q->node);
+ kfree(q);
+ }
+ }
+ mutex_unlock(&dquirks_lock);
+
+}
+
+/**
+ * hid_quirks_init: apply HID quirks specified at module load time
+ */
+int hid_quirks_init(char **quirks_param, __u16 bus, int count)
+{
+ struct hid_device_id id = { 0 };
+ int n = 0, m;
+ unsigned short int vendor, product;
+ u32 quirks;
+
+ id.bus = bus;
+
+ for (; n < count && quirks_param[n]; n++) {
+
+ m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
+ &vendor, &product, &quirks);
+
+ id.vendor = (__u16)vendor;
+ id.product = (__u16)product;
+
+ if (m != 3 ||
+ hid_modify_dquirk(&id, quirks) != 0) {
+ pr_warn("Could not parse HID quirk module param %s\n",
+ quirks_param[n]);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_quirks_init);
+
+/**
+ * hid_quirks_exit: release memory associated with dynamic_quirks
+ * @bus: a bus to match against
+ *
+ * Description:
+ * Release all memory associated with dynamic quirks for a given bus.
+ * Called upon module unload.
+ * Use HID_BUS_ANY to remove all dynamic quirks.
+ *
+ * Returns: nothing
+ */
+void hid_quirks_exit(__u16 bus)
+{
+ hid_remove_all_dquirks(bus);
+}
+EXPORT_SYMBOL_GPL(hid_quirks_exit);
+
+/**
+ * hid_gets_squirk: return any static quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Given a HID device, return a pointer to the quirked hid_device_id entry
+ * associated with that device.
+ *
+ * Returns: the quirks.
+ */
+static unsigned long hid_gets_squirk(const struct hid_device *hdev)
+{
+ const struct hid_device_id *bl_entry;
+ unsigned long quirks = 0;
+
+ if (hid_match_id(hdev, hid_ignore_list))
+ quirks |= HID_QUIRK_IGNORE;
+
+ if (hid_match_id(hdev, hid_have_special_driver))
+ quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
+
+ bl_entry = hid_match_id(hdev, hid_quirks);
+ if (bl_entry != NULL)
+ quirks |= bl_entry->driver_data;
+
+ if (quirks)
+ dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ quirks, hdev->vendor, hdev->product);
+ return quirks;
+}
+
+/**
+ * hid_lookup_quirk: return any quirks associated with a HID device
+ * @hdev: the HID device to look for
+ *
+ * Description:
+ * Given a HID device, return any quirks associated with that device.
+ *
+ * Returns: an unsigned long quirks value.
+ */
+unsigned long hid_lookup_quirk(const struct hid_device *hdev)
+{
+ unsigned long quirks = 0;
+ const struct hid_device_id *quirk_entry = NULL;
+
+ /* NCR devices must not be queried for reports */
+ if (hdev->bus == BUS_USB &&
+ hdev->vendor == USB_VENDOR_ID_NCR &&
+ hdev->product >= USB_DEVICE_ID_NCR_FIRST &&
+ hdev->product <= USB_DEVICE_ID_NCR_LAST)
+ return HID_QUIRK_NO_INIT_REPORTS;
+
+ /* These devices must be ignored if version (bcdDevice) is too old */
+ if (hdev->bus == BUS_USB && hdev->vendor == USB_VENDOR_ID_JABRA) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_JABRA_SPEAK_410:
+ if (hdev->version < 0x0111)
+ return HID_QUIRK_IGNORE;
+ break;
+ case USB_DEVICE_ID_JABRA_SPEAK_510:
+ if (hdev->version < 0x0214)
+ return HID_QUIRK_IGNORE;
+ break;
+ }
+ }
+
+ mutex_lock(&dquirks_lock);
+ quirk_entry = hid_exists_dquirk(hdev);
+ if (quirk_entry)
+ quirks = quirk_entry->driver_data;
+ else
+ quirks = hid_gets_squirk(hdev);
+ mutex_unlock(&dquirks_lock);
+
+ return quirks;
+}
+EXPORT_SYMBOL_GPL(hid_lookup_quirk);
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 0f43c4292685..c6c05df3e8d2 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -731,6 +731,7 @@ static const struct hid_device_id rmi_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 43617fb28b87..317c9c2c0a7c 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
uint new_profile_index)
{
+ if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
+ return;
kovaplus->actual_profile = new_profile_index;
kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index fb77dec720a4..b7e86aba6f33 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -137,7 +137,7 @@ exit_unlock:
return retval;
}
-static unsigned int roccat_poll(struct file *file, poll_table *wait)
+static __poll_t roccat_poll(struct file *file, poll_table *wait)
{
struct roccat_reader *reader = file->private_data;
poll_wait(file, &reader->device->wait, wait);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 0bcf041368c7..21ed6c55c40a 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -702,11 +702,11 @@ static int hid_sensor_custom_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
-static unsigned int hid_sensor_custom_poll(struct file *file,
+static __poll_t hid_sensor_custom_poll(struct file *file,
struct poll_table_struct *wait)
{
struct hid_sensor_custom *sensor_inst;
- unsigned int mask = 0;
+ __poll_t mask = 0;
sensor_inst = container_of(file->private_data,
struct hid_sensor_custom, custom_dev);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b9dc3ac4d4aa..ccdc5f2d01b1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -473,6 +473,7 @@ struct motion_output_report_02 {
#define DS4_FEATURE_REPORT_0x02_SIZE 37
#define DS4_FEATURE_REPORT_0x05_SIZE 41
#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0xA3_SIZE 49
#define DS4_INPUT_REPORT_0x11_SIZE 78
#define DS4_OUTPUT_REPORT_0x05_SIZE 32
#define DS4_OUTPUT_REPORT_0x11_SIZE 78
@@ -544,6 +545,8 @@ struct sony_sc {
struct power_supply *battery;
struct power_supply_desc battery_desc;
int device_id;
+ unsigned fw_version;
+ unsigned hw_version;
u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
@@ -627,6 +630,29 @@ static ssize_t ds4_store_poll_interval(struct device *dev,
static DEVICE_ATTR(bt_poll_interval, 0644, ds4_show_poll_interval,
ds4_store_poll_interval);
+static ssize_t sony_show_firmware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->fw_version);
+}
+
+static DEVICE_ATTR(firmware_version, 0444, sony_show_firmware_version, NULL);
+
+static ssize_t sony_show_hardware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->hw_version);
+}
+
+static DEVICE_ATTR(hardware_version, 0444, sony_show_hardware_version, NULL);
static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
@@ -1646,6 +1672,31 @@ static void dualshock4_calibration_work(struct work_struct *work)
spin_unlock_irqrestore(&sc->lock, flags);
}
+static int dualshock4_get_version_info(struct sony_sc *sc)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(DS4_FEATURE_REPORT_0xA3_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(sc->hdev, 0xA3, buf,
+ DS4_FEATURE_REPORT_0xA3_SIZE,
+ HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ kfree(buf);
+ return ret;
+ }
+
+ sc->hw_version = get_unaligned_le16(&buf[35]);
+ sc->fw_version = get_unaligned_le16(&buf[41]);
+
+ kfree(buf);
+ return 0;
+}
+
static void sixaxis_set_leds_from_id(struct sony_sc *sc)
{
static const u8 sixaxis_leds[10][4] = {
@@ -2399,10 +2450,7 @@ static int sony_check_add(struct sony_sc *sc)
memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2432,10 +2480,7 @@ static int sony_check_add(struct sony_sc *sc)
sc->mac_address[5-n] = buf[4+n];
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else {
return 0;
}
@@ -2619,6 +2664,28 @@ static int sony_input_configured(struct hid_device *hdev,
goto err_stop;
}
+ ret = dualshock4_get_version_info(sc);
+ if (ret < 0) {
+ hid_err(sc->hdev, "Failed to get version data from Dualshock 4\n");
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (ret) {
+ /* Make zero for cleanup reasons of sysfs entries. */
+ sc->fw_version = 0;
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs firmware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_hardware_version);
+ if (ret) {
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs hardware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
/*
* The Dualshock 4 touchpad supports 2 touches and has a
* resolution of 1920x942 (44.86 dots/mm).
@@ -2695,6 +2762,10 @@ err_stop:
*/
if (sc->ds4_bt_poll_interval)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
if (sc->quirks & SONY_BATTERY_SUPPORT)
@@ -2796,6 +2867,12 @@ static void sony_remove(struct hid_device *hdev)
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
+
sony_cancel_work_sync(sc);
kfree(sc->output_report_dmabuf);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 5fbe0f81ab2e..be210219f982 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -249,7 +249,7 @@ out:
return ret;
}
-static unsigned int hidraw_poll(struct file *file, poll_table *wait)
+static __poll_t hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index e054ee43c1e2..7230243b94d3 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -934,11 +934,6 @@ static int i2c_hid_of_probe(struct i2c_client *client,
}
pdata->hid_descriptor_address = val;
- ret = of_property_read_u32(dev->of_node, "post-power-on-delay-ms",
- &val);
- if (!ret)
- pdata->post_power_delay_ms = val;
-
return 0;
}
@@ -955,6 +950,16 @@ static inline int i2c_hid_of_probe(struct i2c_client *client,
}
#endif
+static void i2c_hid_fwnode_probe(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ u32 val;
+
+ if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
+ &val))
+ pdata->post_power_delay_ms = val;
+}
+
static int i2c_hid_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -998,6 +1003,9 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
+ /* Parse platform agnostic common properties from ACPI / device tree */
+ i2c_hid_fwnode_probe(client, &ihid->pdata);
+
ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(ihid->pdata.supply)) {
ret = PTR_ERR(ihid->pdata.supply);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 2aac097c3f70..97869b7410eb 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -28,6 +28,7 @@
#define SPT_Ax_DEVICE_ID 0x9D35
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
+#define CNL_H_DEVICE_ID 0xA37C
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 20d824f74f99..582e449be9fe 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -37,6 +37,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 6f819f144cb4..fc43850a155e 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -753,7 +753,7 @@ unlock:
return ret ? ret : count;
}
-static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 0ff227d0c033..b6349e30bd93 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -3,7 +3,7 @@
# Makefile for the USB input drivers
#
-usbhid-y := hid-core.o hid-quirks.o
+usbhid-y := hid-core.o
usbhid-$(CONFIG_USB_HIDDEV) += hiddev.o
usbhid-$(CONFIG_HID_PID) += hid-pidff.o
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 640dfb937c69..77c50cdfff97 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -978,8 +978,7 @@ static int usbhid_parse(struct hid_device *hid)
int num_descriptors;
size_t offset = offsetof(struct hid_descriptor, desc);
- quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
+ quirks = hid_lookup_quirk(hid);
if (quirks & HID_QUIRK_IGNORE)
return -ENODEV;
@@ -1328,8 +1327,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->bus = BUS_USB;
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
+ hid->version = le16_to_cpu(dev->descriptor.bcdDevice);
hid->name[0] = 0;
- hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1641,7 +1640,7 @@ static int __init hid_init(void)
{
int retval = -ENOMEM;
- retval = usbhid_quirks_init(quirks_param);
+ retval = hid_quirks_init(quirks_param, BUS_USB, MAX_USBHID_BOOT_QUIRKS);
if (retval)
goto usbhid_quirks_init_fail;
retval = usb_register(&hid_driver);
@@ -1651,7 +1650,7 @@ static int __init hid_init(void)
return 0;
usb_register_fail:
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
usbhid_quirks_init_fail:
return retval;
}
@@ -1659,7 +1658,7 @@ usbhid_quirks_init_fail:
static void __exit hid_exit(void)
{
usb_deregister(&hid_driver);
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
}
module_init(hid_init);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
deleted file mode 100644
index 331f7f34ec14..000000000000
--- a/drivers/hid/usbhid/hid-quirks.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * USB HID quirks support for Linux
- *
- * Copyright (c) 1999 Andreas Gal
- * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006-2007 Jiri Kosina
- * Copyright (c) 2007 Paul Walmsley
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/hid.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-#include "../hid-ids.h"
-
-/*
- * Alphabetically sorted blacklist by quirk type.
- */
-
-static const struct hid_blacklist {
- __u16 idVendor;
- __u16 idProduct;
- __u32 quirks;
-} hid_blacklist[] = {
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN, HID_QUIRK_MULTI_INPUT},
- { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
-
- { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
-
- { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
- { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
-
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK, HID_QUIRK_MULTI_INPUT },
-
- { 0, 0 }
-};
-
-/* Dynamic HID quirks list - specified at runtime */
-struct quirks_list_struct {
- struct hid_blacklist hid_bl_item;
- struct list_head node;
-};
-
-static LIST_HEAD(dquirks_list);
-static DECLARE_RWSEM(dquirks_rwsem);
-
-/* Runtime ("dynamic") quirks manipulation functions */
-
-/**
- * usbhid_exists_dquirk: find any dynamic quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Scans dquirks_list for a matching dynamic quirk and returns
- * the pointer to the relevant struct hid_blacklist if found.
- * Must be called with a read lock held on dquirks_rwsem.
- *
- * Returns: NULL if no quirk found, struct hid_blacklist * if found.
- */
-static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor,
- const u16 idProduct)
-{
- struct quirks_list_struct *q;
- struct hid_blacklist *bl_entry = NULL;
-
- list_for_each_entry(q, &dquirks_list, node) {
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
- bl_entry = &q->hid_bl_item;
- break;
- }
- }
-
- if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
-
- return bl_entry;
-}
-
-
-/**
- * usbhid_modify_dquirk: add/replace a HID quirk
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- * @quirks: the u32 quirks value to add/replace
- *
- * Description:
- * If an dynamic quirk exists in memory for this (idVendor,
- * idProduct) pair, replace its quirks value with what was
- * provided. Otherwise, add the quirk to the dynamic quirks list.
- *
- * Returns: 0 OK, -error on failure.
- */
-static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
- const u32 quirks)
-{
- struct quirks_list_struct *q_new, *q;
- int list_edited = 0;
-
- if (!idVendor) {
- dbg_hid("Cannot add a quirk with idVendor = 0\n");
- return -EINVAL;
- }
-
- q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
- if (!q_new)
- return -ENOMEM;
-
- q_new->hid_bl_item.idVendor = idVendor;
- q_new->hid_bl_item.idProduct = idProduct;
- q_new->hid_bl_item.quirks = quirks;
-
- down_write(&dquirks_rwsem);
-
- list_for_each_entry(q, &dquirks_list, node) {
-
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
-
- list_replace(&q->node, &q_new->node);
- kfree(q);
- list_edited = 1;
- break;
-
- }
-
- }
-
- if (!list_edited)
- list_add_tail(&q_new->node, &dquirks_list);
-
- up_write(&dquirks_rwsem);
-
- return 0;
-}
-
-/**
- * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory
- *
- * Description:
- * Free all memory associated with dynamic quirks - called before
- * module unload.
- *
- */
-static void usbhid_remove_all_dquirks(void)
-{
- struct quirks_list_struct *q, *temp;
-
- down_write(&dquirks_rwsem);
- list_for_each_entry_safe(q, temp, &dquirks_list, node) {
- list_del(&q->node);
- kfree(q);
- }
- up_write(&dquirks_rwsem);
-
-}
-
-/**
- * usbhid_quirks_init: apply USB HID quirks specified at module load time
- */
-int usbhid_quirks_init(char **quirks_param)
-{
- u16 idVendor, idProduct;
- u32 quirks;
- int n = 0, m;
-
- for (; n < MAX_USBHID_BOOT_QUIRKS && quirks_param[n]; n++) {
-
- m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
- &idVendor, &idProduct, &quirks);
-
- if (m != 3 ||
- usbhid_modify_dquirk(idVendor, idProduct, quirks) != 0) {
- pr_warn("Could not parse HID quirk module param %s\n",
- quirks_param[n]);
- }
- }
-
- return 0;
-}
-
-/**
- * usbhid_quirks_exit: release memory associated with dynamic_quirks
- *
- * Description:
- * Release all memory associated with dynamic quirks. Called upon
- * module unload.
- *
- * Returns: nothing
- */
-void usbhid_quirks_exit(void)
-{
- usbhid_remove_all_dquirks();
-}
-
-/**
- * usbhid_exists_squirk: return any static quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return a pointer to
- * the hid_blacklist entry associated with that device.
- *
- * Returns: pointer if quirk found, or NULL if no quirks found.
- */
-static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
- const u16 idProduct)
-{
- const struct hid_blacklist *bl_entry = NULL;
- int n = 0;
-
- for (; hid_blacklist[n].idVendor; n++)
- if (hid_blacklist[n].idVendor == idVendor &&
- (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
- hid_blacklist[n].idProduct == idProduct))
- bl_entry = &hid_blacklist[n];
-
- if (bl_entry != NULL)
- dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
- return bl_entry;
-}
-
-/**
- * usbhid_lookup_quirk: return any quirks associated with a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return any quirks associated
- * with that device.
- *
- * Returns: a u32 quirks value.
- */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
-{
- u32 quirks = 0;
- const struct hid_blacklist *bl_entry = NULL;
-
- /* NCR devices must not be queried for reports */
- if (idVendor == USB_VENDOR_ID_NCR &&
- idProduct >= USB_DEVICE_ID_NCR_FIRST &&
- idProduct <= USB_DEVICE_ID_NCR_LAST)
- return HID_QUIRK_NO_INIT_REPORTS;
-
- down_read(&dquirks_rwsem);
- bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
- if (!bl_entry)
- bl_entry = usbhid_exists_squirk(idVendor, idProduct);
- if (bl_entry)
- quirks = bl_entry->quirks;
- up_read(&dquirks_rwsem);
-
- return quirks;
-}
-
-EXPORT_SYMBOL_GPL(usbhid_lookup_quirk);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 7d749b19c27c..0ff3e7e70c8d 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -422,7 +422,7 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
* "poll" file op
* No kernel lock - fine
*/
-static unsigned int hiddev_poll(struct file *file, poll_table *wait)
+static __poll_t hiddev_poll(struct file *file, poll_table *wait)
{
struct hiddev_list *list = file->private_data;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index ee71ad9b6cc1..409543160af7 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -56,6 +56,107 @@ static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf,
return retval;
}
+static void wacom_wac_queue_insert(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo,
+ u8 *raw_data, int size)
+{
+ bool warned = false;
+
+ while (kfifo_avail(fifo) < size) {
+ if (!warned)
+ hid_warn(hdev, "%s: kfifo has filled, starting to drop events\n", __func__);
+ warned = true;
+
+ kfifo_skip(fifo);
+ }
+
+ kfifo_in(fifo, raw_data, size);
+}
+
+static void wacom_wac_queue_flush(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo)
+{
+ while (!kfifo_is_empty(fifo)) {
+ u8 buf[WACOM_PKGLEN_MAX];
+ int size;
+ int err;
+
+ size = kfifo_out(fifo, buf, sizeof(buf));
+ err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
+ if (err) {
+ hid_warn(hdev, "%s: unable to flush event due to error %d\n",
+ __func__, err);
+ }
+ }
+}
+
+static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ bool flush = false;
+ bool insert = false;
+ int i, j;
+
+ if (wacom_wac->serial[0] || !(features->quirks & WACOM_QUIRK_TOOLSERIAL))
+ return 0;
+
+ /* Queue events which have invalid tool type or serial number */
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ struct hid_field *field = report->field[i];
+ struct hid_usage *usage = &field->usage[j];
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ unsigned int offset;
+ unsigned int size;
+ unsigned int value;
+
+ if (equivalent_usage != HID_DG_INRANGE &&
+ equivalent_usage != HID_DG_TOOLSERIALNUMBER &&
+ equivalent_usage != WACOM_HID_WD_SERIALHI &&
+ equivalent_usage != WACOM_HID_WD_TOOLTYPE)
+ continue;
+
+ offset = field->report_offset;
+ size = field->report_size;
+ value = hid_field_extract(hdev, raw_data+1, offset + j * size, size);
+
+ /* If we go out of range, we need to flush the queue ASAP */
+ if (equivalent_usage == HID_DG_INRANGE)
+ value = !value;
+
+ if (value) {
+ flush = true;
+ switch (equivalent_usage) {
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = value;
+ break;
+
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ break;
+
+ case WACOM_HID_WD_TOOLTYPE:
+ wacom_wac->id[0] = value;
+ break;
+ }
+ }
+ else {
+ insert = true;
+ }
+ }
+ }
+
+ if (flush)
+ wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ else if (insert)
+ wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
+
+ return insert && !flush;
+}
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -64,6 +165,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
if (size > WACOM_PKGLEN_MAX)
return 1;
+ if (wacom_wac_pen_serial_enforce(hdev, report, raw_data, size))
+ return -1;
+
memcpy(wacom->wacom_wac.data, raw_data, size);
wacom_wac_irq(&wacom->wacom_wac, size);
@@ -2347,23 +2451,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
int i;
unsigned long flags;
- spin_lock_irqsave(&remote->remote_lock, flags);
- remote->remotes[index].registered = false;
- spin_unlock_irqrestore(&remote->remote_lock, flags);
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (remote->remotes[i].serial == serial) {
- if (remote->remotes[index].battery.battery)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index].battery.bat_desc);
+ spin_lock_irqsave(&remote->remote_lock, flags);
+ remote->remotes[i].registered = false;
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
- if (remote->remotes[index].group.name)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index]);
+ if (remote->remotes[i].battery.battery)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i].battery.bat_desc);
+
+ if (remote->remotes[i].group.name)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i]);
- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- if (remote->remotes[i].serial == serial) {
remote->remotes[i].serial = 0;
remote->remotes[i].group.name = NULL;
- remote->remotes[i].registered = false;
remote->remotes[i].battery.battery = NULL;
wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
}
@@ -2580,6 +2684,10 @@ static int wacom_probe(struct hid_device *hdev,
goto fail;
}
+ error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error)
+ goto fail;
+
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
@@ -2643,6 +2751,8 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
+ kfifo_free(&wacom_wac->pen_fifo);
+
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 16af6886e828..90c38a0523e9 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
struct wacom_features *features = &wacom_wac->features;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
int i;
- bool is_touch_on = value;
bool do_report = false;
/*
@@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
break;
case WACOM_HID_WD_MUTE_DEVICE:
- if (wacom_wac->shared->touch_input && value) {
- wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on;
- is_touch_on = wacom_wac->shared->is_touch_on;
- }
-
- /* fall through*/
case WACOM_HID_WD_TOUCHONOFF:
if (wacom_wac->shared->touch_input) {
+ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
+
+ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
+ *is_touch_on = !(*is_touch_on);
+ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
+ *is_touch_on = value;
+
input_report_switch(wacom_wac->shared->touch_input,
- SW_MUTE_DEVICE, !is_touch_on);
+ SW_MUTE_DEVICE, !(*is_touch_on));
input_sync(wacom_wac->shared->touch_input);
}
break;
@@ -2085,7 +2085,29 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0);
break;
case HID_DG_TOOLSERIALNUMBER:
+ features->quirks |= WACOM_QUIRK_TOOLSERIAL;
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
+
+ /* Adjust AES usages to match modern convention */
+ if (usage->hid == WACOM_HID_WT_SERIALNUMBER && field->report_size == 16) {
+ if (field->index + 2 < field->report->maxfield) {
+ struct hid_field *a = field->report->field[field->index + 1];
+ struct hid_field *b = field->report->field[field->index + 2];
+
+ if (a->maxusage > 0 && a->usage[0].hid == HID_DG_TOOLSERIALNUMBER && a->report_size == 32 &&
+ b->maxusage > 0 && b->usage[0].hid == 0xFF000000 && b->report_size == 8) {
+ features->quirks |= WACOM_QUIRK_AESPEN;
+ usage->hid = WACOM_HID_WD_TOOLTYPE;
+ field->logical_minimum = S16_MIN;
+ field->logical_maximum = S16_MAX;
+ a->logical_minimum = S32_MIN;
+ a->logical_maximum = S32_MAX;
+ b->usage[0].hid = WACOM_HID_WD_SERIALHI;
+ b->logical_minimum = 0;
+ b->logical_maximum = U8_MAX;
+ }
+ }
+ }
break;
case WACOM_HID_WD_SENSE:
features->quirks |= WACOM_QUIRK_SENSE;
@@ -2093,15 +2115,18 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
break;
case WACOM_HID_WD_SERIALHI:
wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
- set_bit(EV_KEY, input->evbit);
- input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
- input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
- input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
- input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
- input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
- if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
- input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
- input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+
+ if (!(features->quirks & WACOM_QUIRK_AESPEN)) {
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ }
}
break;
case WACOM_HID_WD_FINGERWHEEL:
@@ -4390,6 +4415,12 @@ static const struct wacom_features wacom_features_0x360 =
static const struct wacom_features wacom_features_0x361 =
{ "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
+static const struct wacom_features wacom_features_0x37A =
+ { "Wacom One by Wacom S", 15200, 9500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x37B =
+ { "Wacom One by Wacom M", 21600, 13500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4558,6 +4589,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x343) },
{ BT_DEVICE_WACOM(0x360) },
{ BT_DEVICE_WACOM(0x361) },
+ { USB_DEVICE_WACOM(0x37A) },
+ { USB_DEVICE_WACOM(0x37B) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 64d8f014602e..15d9c14fbdf7 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/hid.h>
+#include <linux/kfifo.h>
/* maximum packet length for USB/BT devices */
#define WACOM_PKGLEN_MAX 361
@@ -86,7 +87,9 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
#define WACOM_QUIRK_SENSE 0x0002
+#define WACOM_QUIRK_AESPEN 0x0004
#define WACOM_QUIRK_BATTERY 0x0008
+#define WACOM_QUIRK_TOOLSERIAL 0x0010
/* device types */
#define WACOM_DEVICETYPE_NONE 0x0000
@@ -107,6 +110,7 @@
#define WACOM_HID_WD_PEN (WACOM_HID_UP_WACOMDIGITIZER | 0x02)
#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALNUMBER (WACOM_HID_UP_WACOMDIGITIZER | 0x5b)
#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
@@ -150,6 +154,7 @@
#define WACOM_HID_WT_TOUCHSCREEN (WACOM_HID_UP_WACOMTOUCH | 0x04)
#define WACOM_HID_WT_TOUCHPAD (WACOM_HID_UP_WACOMTOUCH | 0x05)
#define WACOM_HID_WT_CONTACTMAX (WACOM_HID_UP_WACOMTOUCH | 0x55)
+#define WACOM_HID_WT_SERIALNUMBER (WACOM_HID_UP_WACOMTOUCH | 0x5b)
#define WACOM_HID_WT_X (WACOM_HID_UP_WACOMTOUCH | 0x130)
#define WACOM_HID_WT_Y (WACOM_HID_UP_WACOMTOUCH | 0x131)
@@ -336,6 +341,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
+ struct kfifo_rec_ptr_2 pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 727f968ac1cb..8fbbacb0fe21 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -451,11 +451,11 @@ static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
cs_release_cmd(msg);
if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
- struct timespec tspec;
+ struct timespec64 tspec;
struct cs_timestamp *tstamp =
&hi->mmap_cfg->tstamp_rx_ctrl;
- ktime_get_ts(&tspec);
+ ktime_get_ts64(&tspec);
tstamp->tv_sec = (__u32) tspec.tv_sec;
tstamp->tv_nsec = (__u32) tspec.tv_nsec;
@@ -1124,10 +1124,10 @@ static int cs_char_fasync(int fd, struct file *file, int on)
return 0;
}
-static unsigned int cs_char_poll(struct file *file, poll_table *wait)
+static __poll_t cs_char_poll(struct file *file, poll_table *wait)
{
struct cs_char *csdata = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &cs_char_data.wait, wait);
spin_lock_bh(&csdata->lock);
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 4402a71e23f7..047959e74bb1 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -104,7 +104,7 @@ static ssize_t hvt_op_write(struct file *file, const char __user *buf,
return ret ? ret : count;
}
-static unsigned int hvt_op_poll(struct file *file, poll_table *wait)
+static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
{
struct hvutil_transport *hvt;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7ad017690e3a..ef23553ff5cb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -26,11 +26,9 @@ if HWMON
config HWMON_VID
tristate
- default n
config HWMON_DEBUG_CHIP
bool "Hardware Monitoring Chip debugging messages"
- default n
help
Say Y here if you want the I2C chip drivers to produce a bunch of
debug messages to the system log. Select this if you are having
@@ -42,7 +40,6 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
depends on AB8500_GPADC && AB8500_BM
- default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -302,7 +299,6 @@ config SENSORS_APPLESMC
select NEW_LEDS
select LEDS_CLASS
select INPUT_POLLDEV
- default n
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -678,7 +674,6 @@ config SENSORS_JC42
config SENSORS_POWR1220
tristate "Lattice POWR1220 Power Monitoring"
depends on I2C
- default n
help
If you say yes here you get access to the hardware monitoring
functions of the Lattice POWR1220 isp Power Supply Monitoring,
@@ -702,7 +697,6 @@ config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC2945
I2C System Monitor.
@@ -727,7 +721,6 @@ config SENSORS_LTC2990
config SENSORS_LTC4151
tristate "Linear Technology LTC4151"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4151
High Voltage I2C Current and Voltage Monitor interface.
@@ -738,7 +731,6 @@ config SENSORS_LTC4151
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4215
Hot Swap Controller I2C interface.
@@ -750,7 +742,6 @@ config SENSORS_LTC4222
tristate "Linear Technology LTC4222"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4222
Dual Hot Swap Controller I2C interface.
@@ -761,7 +752,6 @@ config SENSORS_LTC4222
config SENSORS_LTC4245
tristate "Linear Technology LTC4245"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4245
Multiple Supply Hot Swap Controller I2C interface.
@@ -773,7 +763,6 @@ config SENSORS_LTC4260
tristate "Linear Technology LTC4260"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4260
Positive Voltage Hot Swap Controller I2C interface.
@@ -784,7 +773,6 @@ config SENSORS_LTC4260
config SENSORS_LTC4261
tristate "Linear Technology LTC4261"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4261
Negative Voltage Hot Swap Controller I2C interface.
@@ -1276,7 +1264,6 @@ config SENSORS_NSA320
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
- default n
help
If you say yes here you get support for Philips PCF8591 4-channel
ADC, 1-channel DAC chips.
@@ -1459,7 +1446,6 @@ config SENSORS_SMSC47B397
config SENSORS_SCH56XX_COMMON
tristate
- default n
config SENSORS_SCH5627
tristate "SMSC SCH5627"
@@ -1505,7 +1491,6 @@ config SENSORS_STTS751
config SENSORS_SMM665
tristate "Summit Microelectronics SMM665"
depends on I2C
- default n
help
If you say yes here you get support for the hardware monitoring
features of the Summit Microelectronics SMM665/SMM665B Six-Channel
@@ -1725,6 +1710,16 @@ config SENSORS_VT8231
This driver can also be built as a module. If so, the module
will be called vt8231.
+config SENSORS_W83773G
+ tristate "Nuvoton W83773G"
+ depends on I2C
+ help
+ If you say yes here you get support for the Nuvoton W83773G hardware
+ monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called w83773g.
+
config SENSORS_W83781D
tristate "Winbond W83781D, W83782D, W83783S, Asus AS99127F"
depends on I2C
@@ -1782,7 +1777,6 @@ config SENSORS_W83795
config SENSORS_W83795_FANCTRL
bool "Include automatic fan control support (DANGEROUS)"
depends on SENSORS_W83795
- default n
help
If you say yes here, support for automatic fan speed control
will be included in the driver.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 0fe489fab663..f814b4ace138 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
# asb100, then w83781d go first, as they can override other drivers' addresses.
obj-$(CONFIG_SENSORS_ASB100) += asb100.o
obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
+obj-$(CONFIG_SENSORS_W83773G) += w83773g.o
obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
obj-$(CONFIG_SENSORS_W83793) += w83793.o
obj-$(CONFIG_SENSORS_W83795) += w83795.o
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 63a95e23ca81..693a3d53cab5 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
@@ -181,6 +182,7 @@ struct aspeed_cooling_device {
struct aspeed_pwm_tacho_data {
struct regmap *regmap;
+ struct reset_control *rst;
unsigned long clk_freq;
bool pwm_present[8];
bool fan_tach_present[16];
@@ -905,6 +907,13 @@ static int aspeed_create_fan(struct device *dev,
return 0;
}
+static void aspeed_pwm_tacho_remove(void *data)
+{
+ struct aspeed_pwm_tacho_data *priv = data;
+
+ reset_control_assert(priv->rst);
+}
+
static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -931,6 +940,19 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ ret = devm_add_action_or_reset(dev, aspeed_pwm_tacho_remove, priv);
+ if (ret)
+ return ret;
+
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE_EXT, 0);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c13a4fd86b3c..4bdbf77f7197 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -246,7 +246,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
int err;
u32 eax, edx;
int i;
- struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ u16 devfn = PCI_DEVFN(0, 0);
+ struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
/*
* Explicit tjmax table entries override heuristics.
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c7c9e95e58a8..bf3bb7e1adab 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -76,6 +76,7 @@ static uint i8k_fan_mult = I8K_FAN_MULT;
static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
+static bool disallow_fan_support;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -242,6 +243,9 @@ static int i8k_get_fan_status(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
@@ -253,6 +257,9 @@ static int i8k_get_fan_speed(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -264,7 +271,7 @@ static int _i8k_get_fan_type(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
- if (disallow_fan_type_call)
+ if (disallow_fan_support || disallow_fan_type_call)
return -EINVAL;
regs.ebx = fan & 0xff;
@@ -289,6 +296,9 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = (fan & 0xff) | (speed << 8);
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -300,6 +310,9 @@ static int i8k_set_fan(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed);
regs.ebx = (fan & 0xff) | (speed << 8);
@@ -772,6 +785,8 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
+ if (disallow_fan_support && index >= 8)
+ return 0;
if (disallow_fan_type_call &&
(index == 9 || index == 12 || index == 15))
return 0;
@@ -1039,6 +1054,30 @@ static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst
};
/*
+ * On some machines all fan related SMM functions implemented by Dell BIOS
+ * firmware freeze kernel for about 500ms. Until Dell fixes these problems fan
+ * support for affected blacklisted Dell machines stay disabled.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=195751
+ */
+static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
+ {
+ .ident = "Dell Inspiron 7720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
+ },
+ },
+ {
+ .ident = "Dell Vostro 3360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
+ },
+ },
+ { }
+};
+
+/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
@@ -1060,8 +1099,17 @@ static int __init i8k_probe(void)
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
- disallow_fan_type_call = true;
+ if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan support\n");
+ if (!force)
+ disallow_fan_support = true;
+ }
+
+ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan type call\n");
+ if (!force)
+ disallow_fan_type_call = true;
+ }
strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
sizeof(bios_version));
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 7b73d2002d3e..0ae1ee1dbf76 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -37,7 +37,7 @@
/**
* struct hih6130 - HIH-6130 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: pointer to I2C client device
* @lock: mutex to protect measurement values
* @valid: only false before first measurement is taken
* @last_update: time of last update (jiffies)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index af5123042990..32083e452cde 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -678,7 +678,7 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: pointer to hwmon chip information
+ * @chip: pointer to hwmon chip information
* @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
@@ -785,11 +785,11 @@ EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
/**
* devm_hwmon_device_register_with_info - register w/ hwmon
- * @dev: the parent device
- * @name: hwmon name attribute
- * @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @chip: pointer to hwmon chip information
+ * @groups: pointer to list of driver specific attribute groups
*
* Returns the pointer to the new device. The new device is automatically
* unregistered with the parent device.
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index f6a76679c650..5e5b32a1ec4b 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -23,7 +23,8 @@
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
* @hwmon_dev: associated hwmon device
- * @attr_group: the group of attributes
+ * @attr_group: the group of attributes
+ * @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
*/
struct iio_hwmon_state {
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 62e38fa8cda2..e9e6aeabbf84 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -116,21 +118,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -438,6 +454,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[chip];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -448,10 +465,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -467,8 +481,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 0721e175664a..06b4e1c78bd8 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -86,6 +86,7 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1950", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1920", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 005ffb5ffa92..49f4b33a5685 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -100,7 +100,7 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_chip_update_interval:
*val = data->sample_time;
- break;;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 08479006c7f9..6e4298e99222 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -39,6 +39,7 @@ config SENSORS_ADM1275
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
+ depends on LEDS_CLASS
help
If you say yes here you get hardware monitoring support for the IBM
Common Form Factor power supply.
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index cb56da6834e5..93d9a9ea112b 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -8,12 +8,29 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/fs.h>
#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pmbus.h>
#include "pmbus.h"
+#define CFFPS_FRU_CMD 0x9A
+#define CFFPS_PN_CMD 0x9B
+#define CFFPS_SN_CMD 0x9E
+#define CFFPS_CCIN_CMD 0xBD
+#define CFFPS_FW_CMD_START 0xFA
+#define CFFPS_FW_NUM_BYTES 4
+#define CFFPS_SYS_CONFIG_CMD 0xDA
+
+#define CFFPS_INPUT_HISTORY_CMD 0xD6
+#define CFFPS_INPUT_HISTORY_SIZE 100
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -24,6 +41,153 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+#define CFFPS_LED_BLINK BIT(0)
+#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
+#define CFFPS_BLINK_RATE_MS 250
+
+enum {
+ CFFPS_DEBUGFS_INPUT_HISTORY = 0,
+ CFFPS_DEBUGFS_FRU,
+ CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_CCIN,
+ CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_NUM_ENTRIES
+};
+
+struct ibm_cffps_input_history {
+ struct mutex update_lock;
+ unsigned long last_update;
+
+ u8 byte_count;
+ u8 data[CFFPS_INPUT_HISTORY_SIZE];
+};
+
+struct ibm_cffps {
+ struct i2c_client *client;
+
+ struct ibm_cffps_input_history input_history;
+
+ int debugfs_entries[CFFPS_DEBUGFS_NUM_ENTRIES];
+
+ char led_name[32];
+ u8 led_state;
+ struct led_classdev led;
+};
+
+#define to_psu(x, y) container_of((x), struct ibm_cffps, debugfs_entries[(y)])
+
+static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int rc;
+ u8 msgbuf0[1] = { CFFPS_INPUT_HISTORY_CMD };
+ u8 msgbuf1[CFFPS_INPUT_HISTORY_SIZE + 1] = { 0 };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags,
+ .len = 1,
+ .buf = msgbuf0,
+ }, {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags | I2C_M_RD,
+ .len = CFFPS_INPUT_HISTORY_SIZE + 1,
+ .buf = msgbuf1,
+ },
+ };
+
+ if (!*ppos) {
+ mutex_lock(&psu->input_history.update_lock);
+ if (time_after(jiffies, psu->input_history.last_update + HZ)) {
+ /*
+ * Use a raw i2c transfer, since we need more bytes
+ * than Linux I2C supports through smbus xfr (only 32).
+ */
+ rc = i2c_transfer(psu->client->adapter, msg, 2);
+ if (rc < 0) {
+ mutex_unlock(&psu->input_history.update_lock);
+ return rc;
+ }
+
+ psu->input_history.byte_count = msgbuf1[0];
+ memcpy(psu->input_history.data, &msgbuf1[1],
+ CFFPS_INPUT_HISTORY_SIZE);
+ psu->input_history.last_update = jiffies;
+ }
+
+ mutex_unlock(&psu->input_history.update_lock);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos,
+ psu->input_history.data,
+ psu->input_history.byte_count);
+}
+
+static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 cmd;
+ int i, rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+ char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_INPUT_HISTORY:
+ return ibm_cffps_read_input_history(psu, buf, count, ppos);
+ case CFFPS_DEBUGFS_FRU:
+ cmd = CFFPS_FRU_CMD;
+ break;
+ case CFFPS_DEBUGFS_PN:
+ cmd = CFFPS_PN_CMD;
+ break;
+ case CFFPS_DEBUGFS_SN:
+ cmd = CFFPS_SN_CMD;
+ break;
+ case CFFPS_DEBUGFS_CCIN:
+ rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 5, "%04X", rc);
+ goto done;
+ case CFFPS_DEBUGFS_FW:
+ for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
+ rc = i2c_smbus_read_byte_data(psu->client,
+ CFFPS_FW_CMD_START + i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 2], 3, "%02X", rc);
+ }
+
+ rc = i * 2;
+ goto done;
+ default:
+ return -EINVAL;
+ }
+
+ rc = i2c_smbus_read_block_data(psu->client, cmd, data);
+ if (rc < 0)
+ return rc;
+
+done:
+ data[rc] = '\n';
+ rc += 2;
+
+ return simple_read_from_buffer(buf, count, ppos, data, rc);
+}
+
+static const struct file_operations ibm_cffps_fops = {
+ .llseek = noop_llseek,
+ .read = ibm_cffps_debugfs_op,
+ .open = simple_open,
+};
+
static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
int reg)
{
@@ -105,6 +269,69 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
+static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ if (brightness == LED_OFF) {
+ psu->led_state = CFFPS_LED_OFF;
+ } else {
+ brightness = LED_FULL;
+ if (psu->led_state != CFFPS_LED_BLINK)
+ psu->led_state = CFFPS_LED_ON;
+ }
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ psu->led_state);
+ if (rc < 0)
+ return;
+
+ led_cdev->brightness = brightness;
+}
+
+static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ psu->led_state = CFFPS_LED_BLINK;
+
+ if (led_cdev->brightness == LED_OFF)
+ return 0;
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_BLINK);
+ if (rc < 0)
+ return rc;
+
+ *delay_on = CFFPS_BLINK_RATE_MS;
+ *delay_off = CFFPS_BLINK_RATE_MS;
+
+ return 0;
+}
+
+static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
+{
+ int rc;
+ struct i2c_client *client = psu->client;
+ struct device *dev = &client->dev;
+
+ snprintf(psu->led_name, sizeof(psu->led_name), "%s-%02x", client->name,
+ client->addr);
+ psu->led.name = psu->led_name;
+ psu->led.max_brightness = LED_FULL;
+ psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.blink_set = ibm_cffps_led_blink_set;
+
+ rc = devm_led_classdev_register(dev, &psu->led);
+ if (rc)
+ dev_warn(dev, "failed to register led class: %d\n", rc);
+}
+
static struct pmbus_driver_info ibm_cffps_info = {
.pages = 1,
.func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
@@ -116,10 +343,69 @@ static struct pmbus_driver_info ibm_cffps_info = {
.read_word_data = ibm_cffps_read_word_data,
};
+static struct pmbus_platform_data ibm_cffps_pdata = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return pmbus_do_probe(client, id, &ibm_cffps_info);
+ int i, rc;
+ struct dentry *debugfs;
+ struct dentry *ibm_cffps_dir;
+ struct ibm_cffps *psu;
+
+ client->dev.platform_data = &ibm_cffps_pdata;
+ rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+ if (rc)
+ return rc;
+
+ /*
+ * Don't fail the probe if there isn't enough memory for leds and
+ * debugfs.
+ */
+ psu = devm_kzalloc(&client->dev, sizeof(*psu), GFP_KERNEL);
+ if (!psu)
+ return 0;
+
+ psu->client = client;
+ mutex_init(&psu->input_history.update_lock);
+ psu->input_history.last_update = jiffies - HZ;
+
+ ibm_cffps_create_led_class(psu);
+
+ /* Don't fail the probe if we can't create debugfs */
+ debugfs = pmbus_get_debugfs_dir(client);
+ if (!debugfs)
+ return 0;
+
+ ibm_cffps_dir = debugfs_create_dir(client->name, debugfs);
+ if (!ibm_cffps_dir)
+ return 0;
+
+ for (i = 0; i < CFFPS_DEBUGFS_NUM_ENTRIES; ++i)
+ psu->debugfs_entries[i] = i;
+
+ debugfs_create_file("input_history", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_INPUT_HISTORY],
+ &ibm_cffps_fops);
+ debugfs_create_file("fru", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FRU],
+ &ibm_cffps_fops);
+ debugfs_create_file("part_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_PN],
+ &ibm_cffps_fops);
+ debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_SN],
+ &ibm_cffps_fops);
+ debugfs_create_file("ccin", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
+ &ibm_cffps_fops);
+ debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FW],
+ &ibm_cffps_fops);
+
+ return 0;
}
static const struct i2c_device_id ibm_cffps_id[] = {
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 8b906b44484b..977315b0fd90 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -25,168 +25,19 @@
#define IR35221_MFR_IOUT_VALLEY 0xcb
#define IR35221_MFR_TEMP_VALLEY 0xcc
-static long ir35221_reg2data(int data, enum pmbus_sensor_classes class)
-{
- s16 exponent;
- s32 mantissa;
- long val;
-
- /* We only modify LINEAR11 formats */
- exponent = ((s16)data) >> 11;
- mantissa = ((s16)((data & 0x7ff) << 5)) >> 5;
-
- val = mantissa * 1000L;
-
- /* scale result to micro-units for power sensors */
- if (class == PSC_POWER)
- val = val * 1000L;
-
- if (exponent >= 0)
- val <<= exponent;
- else
- val >>= -exponent;
-
- return val;
-}
-
-#define MAX_MANTISSA (1023 * 1000)
-#define MIN_MANTISSA (511 * 1000)
-
-static u16 ir35221_data2reg(long val, enum pmbus_sensor_classes class)
-{
- s16 exponent = 0, mantissa;
- bool negative = false;
-
- if (val == 0)
- return 0;
-
- if (val < 0) {
- negative = true;
- val = -val;
- }
-
- /* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
- val = DIV_ROUND_CLOSEST(val, 1000L);
-
- /* Reduce large mantissa until it fits into 10 bit */
- while (val >= MAX_MANTISSA && exponent < 15) {
- exponent++;
- val >>= 1;
- }
- /* Increase small mantissa to improve precision */
- while (val < MIN_MANTISSA && exponent > -15) {
- exponent--;
- val <<= 1;
- }
-
- /* Convert mantissa from milli-units to units */
- mantissa = DIV_ROUND_CLOSEST(val, 1000);
-
- /* Ensure that resulting number is within range */
- if (mantissa > 0x3ff)
- mantissa = 0x3ff;
-
- /* restore sign */
- if (negative)
- mantissa = -mantissa;
-
- /* Convert to 5 bit exponent, 11 bit mantissa */
- return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
-}
-
-static u16 ir35221_scale_result(s16 data, int shift,
- enum pmbus_sensor_classes class)
-{
- long val;
-
- val = ir35221_reg2data(data, class);
-
- if (shift < 0)
- val >>= -shift;
- else
- val <<= shift;
-
- return ir35221_data2reg(val, class);
-}
-
static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, 1, PSC_CURRENT_OUT);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- ret = ir35221_scale_result(ret, -4, PSC_VOLTAGE_IN);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_VIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_VIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
- break;
- case PMBUS_READ_IIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IIN);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -4, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -5, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_POUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_POUT);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_PIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_PIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_IOUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IOUT);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_OUT);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_OUT);
- break;
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
@@ -194,9 +45,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_VIN_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_VIN_VALLEY);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page,
@@ -205,12 +53,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_IOUT_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_IOUT_VALLEY);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MIN:
ret = pmbus_read_word_data(client, page,
@@ -224,36 +66,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ir35221_write_word_data(struct i2c_client *client, int page, int reg,
- u16 word)
-{
- int ret;
- u16 val;
-
- switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, -1, PSC_CURRENT_OUT);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- val = ir35221_scale_result(word, 4, PSC_VOLTAGE_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, 1, PSC_CURRENT_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- default:
- ret = -ENODATA;
- break;
- }
-
- return ret;
-}
-
static int ir35221_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -292,7 +104,6 @@ static int ir35221_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- info->write_word_data = ir35221_write_word_data;
info->read_word_data = ir35221_read_word_data;
info->pages = 2;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 10d17fb8f283..53db78753a0d 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -1,5 +1,5 @@
/*
- * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
+enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -53,11 +53,6 @@ enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
#define LM25056_MFR_STS_VAUX_OV_WARN BIT(1)
#define LM25056_MFR_STS_VAUX_UV_WARN BIT(0)
-/* LM25063 only */
-
-#define LM25063_READ_VOUT_MAX 0xe5
-#define LM25063_READ_VOUT_MIN 0xe6
-
struct __coeff {
short m, b, R;
};
@@ -122,36 +117,6 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
- [lm25063] = {
- [PSC_VOLTAGE_IN] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_VOLTAGE_OUT] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_CURRENT_IN] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_CURRENT_IN_L] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_POWER] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_POWER_L] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_TEMPERATURE] = {
- .m = 15596,
- .R = -3,
- },
- },
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
@@ -272,10 +237,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
/* VIN: 6.14 mV VAUX: 293 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
- case lm25063:
- /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
- ret = DIV_ROUND_CLOSEST(ret * 20, 625);
- break;
case lm25066:
/* VIN: 4.54 mV VAUX: 283.2 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -330,24 +291,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
-{
- int ret;
-
- switch (reg) {
- case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
- break;
- case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
- break;
- default:
- ret = lm25066_read_word_data(client, page, reg);
- break;
- }
- return ret;
-}
-
static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -502,11 +445,6 @@ static int lm25066_probe(struct i2c_client *client,
info->read_word_data = lm25056_read_word_data;
info->read_byte_data = lm25056_read_byte_data;
data->rlimit = 0x0fff;
- } else if (data->id == lm25063) {
- info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
- | PMBUS_HAVE_POUT;
- info->read_word_data = lm25063_read_word_data;
- data->rlimit = 0xffff;
} else {
info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
info->read_word_data = lm25066_read_word_data;
@@ -543,7 +481,6 @@ static int lm25066_probe(struct i2c_client *client,
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
- {"lm25063", lm25063},
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 9313849d5160..c9dc8799b5e1 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -16,12 +16,231 @@
enum max31785_regs {
MFR_REVISION = 0x9b,
+ MFR_FAN_CONFIG = 0xf1,
};
+#define MAX31785 0x3030
+#define MAX31785A 0x3040
+
+#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
+
#define MAX31785_NR_PAGES 23
+#define MAX31785_NR_FAN_PAGES 6
+
+static int max31785_read_byte_data(struct i2c_client *client, int page,
+ int reg)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ return -ENOTSUPP;
+ case PMBUS_FAN_CONFIG_12:
+ return pmbus_read_byte_data(client, page - MAX31785_NR_PAGES,
+ reg);
+ }
+
+ return -ENODATA;
+}
+
+static int max31785_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ return -ENOTSUPP;
+}
+
+static int max31785_read_long_data(struct i2c_client *client, int page,
+ int reg, u32 *data)
+{
+ unsigned char cmdbuf[1];
+ unsigned char rspbuf[4];
+ int rc;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmdbuf),
+ .buf = cmdbuf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(rspbuf),
+ .buf = rspbuf,
+ },
+ };
+
+ cmdbuf[0] = reg;
+
+ rc = pmbus_set_page(client, page);
+ if (rc < 0)
+ return rc;
+
+ rc = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (rc < 0)
+ return rc;
+
+ *data = (rspbuf[0] << (0 * 8)) | (rspbuf[1] << (1 * 8)) |
+ (rspbuf[2] << (2 * 8)) | (rspbuf[3] << (3 * 8));
+
+ return rc;
+}
+
+static int max31785_get_pwm(struct i2c_client *client, int page)
+{
+ int rv;
+
+ rv = pmbus_get_fan_rate_device(client, page, 0, percent);
+ if (rv < 0)
+ return rv;
+ else if (rv >= 0x8000)
+ return 0;
+ else if (rv >= 0x2711)
+ return 0x2710;
+
+ return rv;
+}
+
+static int max31785_get_pwm_mode(struct i2c_client *client, int page)
+{
+ int config;
+ int command;
+
+ config = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ if (config < 0)
+ return config;
+
+ command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1);
+ if (command < 0)
+ return command;
+
+ if (config & PB_FAN_1_RPM)
+ return (command >= 0x8000) ? 3 : 2;
+
+ if (command >= 0x8000)
+ return 3;
+ else if (command >= 0x2711)
+ return 0;
+
+ return 1;
+}
+
+static int max31785_read_word_data(struct i2c_client *client, int page,
+ int reg)
+{
+ u32 val;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_READ_FAN_SPEED_1:
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ rv = max31785_read_long_data(client, page - MAX31785_NR_PAGES,
+ reg, &val);
+ if (rv < 0)
+ return rv;
+
+ rv = (val >> 16) & 0xffff;
+ break;
+ case PMBUS_FAN_COMMAND_1:
+ /*
+ * PMBUS_FAN_COMMAND_x is probed to judge whether or not to
+ * expose fan control registers.
+ *
+ * Don't expose fan_target attribute for virtual pages.
+ */
+ rv = (page >= MAX31785_NR_PAGES) ? -ENOTSUPP : -ENODATA;
+ break;
+ case PMBUS_VIRT_PWM_1:
+ rv = max31785_get_pwm(client, page);
+ break;
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ rv = max31785_get_pwm_mode(client, page);
+ break;
+ default:
+ rv = -ENODATA;
+ break;
+ }
+
+ return rv;
+}
+
+static inline u32 max31785_scale_pwm(u32 sensor_val)
+{
+ /*
+ * The datasheet describes the accepted value range for manual PWM as
+ * [0, 0x2710], while the hwmon pwmX sysfs interface accepts values in
+ * [0, 255]. The MAX31785 uses DIRECT mode to scale the FAN_COMMAND
+ * registers and in PWM mode the coefficients are m=1, b=0, R=2. The
+ * important observation here is that 0x2710 == 10000 == 100 * 100.
+ *
+ * R=2 (== 10^2 == 100) accounts for scaling the value provided at the
+ * sysfs interface into the required hardware resolution, but it does
+ * not yet yield a value that we can write to the device (this initial
+ * scaling is handled by pmbus_data2reg()). Multiplying by 100 below
+ * translates the parameter value into the percentage units required by
+ * PMBus, and then we scale back by 255 as required by the hwmon pwmX
+ * interface to yield the percentage value at the appropriate
+ * resolution for hardware.
+ */
+ return (sensor_val * 100) / 255;
+}
+
+static int max31785_pwm_enable(struct i2c_client *client, int page,
+ u16 word)
+{
+ int config = 0;
+ int rate;
+
+ switch (word) {
+ case 0:
+ rate = 0x7fff;
+ break;
+ case 1:
+ rate = pmbus_get_fan_rate_cached(client, page, 0, percent);
+ if (rate < 0)
+ return rate;
+ rate = max31785_scale_pwm(rate);
+ break;
+ case 2:
+ config = PB_FAN_1_RPM;
+ rate = pmbus_get_fan_rate_cached(client, page, 0, rpm);
+ if (rate < 0)
+ return rate;
+ break;
+ case 3:
+ rate = 0xffff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_update_fan(client, page, 0, config, PB_FAN_1_RPM, rate);
+}
+
+static int max31785_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_PWM_1:
+ return pmbus_update_fan(client, page, 0, 0, PB_FAN_1_RPM,
+ max31785_scale_pwm(word));
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ return max31785_pwm_enable(client, page, word);
+ default:
+ break;
+ }
+
+ return -ENODATA;
+}
#define MAX31785_FAN_FUNCS \
- (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_PWM12)
#define MAX31785_TEMP_FUNCS \
(PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
@@ -29,14 +248,26 @@ enum max31785_regs {
#define MAX31785_VOUT_FUNCS \
(PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+#define MAX37185_NUM_FAN_PAGES 6
+
static const struct pmbus_driver_info max31785_info = {
.pages = MAX31785_NR_PAGES,
+ .write_word_data = max31785_write_word_data,
+ .read_byte_data = max31785_read_byte_data,
+ .read_word_data = max31785_read_word_data,
+ .write_byte = max31785_write_byte,
+
/* RPM */
.format[PSC_FAN] = direct,
.m[PSC_FAN] = 1,
.b[PSC_FAN] = 0,
.R[PSC_FAN] = 0,
+ /* PWM */
+ .format[PSC_PWM] = direct,
+ .m[PSC_PWM] = 1,
+ .b[PSC_PWM] = 0,
+ .R[PSC_PWM] = 2,
.func[0] = MAX31785_FAN_FUNCS,
.func[1] = MAX31785_FAN_FUNCS,
.func[2] = MAX31785_FAN_FUNCS,
@@ -72,13 +303,46 @@ static const struct pmbus_driver_info max31785_info = {
.func[22] = MAX31785_VOUT_FUNCS,
};
+static int max31785_configure_dual_tach(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < MAX31785_NR_FAN_PAGES; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_FAN_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MFR_FAN_CONFIG_DUAL_TACH) {
+ int virtual = MAX31785_NR_PAGES + i;
+
+ info->pages = virtual + 1;
+ info->func[virtual] |= PMBUS_HAVE_FAN12;
+ info->func[virtual] |= PMBUS_PAGE_VIRTUAL;
+ }
+ }
+
+ return 0;
+}
+
static int max31785_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
+ bool dual_tach = false;
s64 ret;
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -89,6 +353,25 @@ static int max31785_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ ret = i2c_smbus_read_word_data(client, MFR_REVISION);
+ if (ret < 0)
+ return ret;
+
+ if (ret == MAX31785A) {
+ dual_tach = true;
+ } else if (ret == MAX31785) {
+ if (!strcmp("max31785a", id->name))
+ dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
+ } else {
+ return -ENODEV;
+ }
+
+ if (dual_tach) {
+ ret = max31785_configure_dual_tach(client, info);
+ if (ret < 0)
+ return ret;
+ }
+
return pmbus_do_probe(client, id, info);
}
@@ -100,9 +383,18 @@ static const struct i2c_device_id max31785_id[] = {
MODULE_DEVICE_TABLE(i2c, max31785_id);
+static const struct of_device_id max31785_of_match[] = {
+ { .compatible = "maxim,max31785" },
+ { .compatible = "maxim,max31785a" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max31785_of_match);
+
static struct i2c_driver max31785_driver = {
.driver = {
.name = "max31785",
+ .of_match_table = max31785_of_match,
},
.probe = max31785_probe,
.remove = pmbus_do_remove,
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index fa613bd209e3..1d24397d36ec 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -190,6 +190,33 @@ enum pmbus_regs {
PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
PMBUS_VIRT_STATUS_VMON,
+
+ /*
+ * RPM and PWM Fan control
+ *
+ * Drivers wanting to expose PWM control must define the behaviour of
+ * PMBUS_VIRT_PWM_[1-4] and PMBUS_VIRT_PWM_ENABLE_[1-4] in the
+ * {read,write}_word_data callback.
+ *
+ * pmbus core provides a default implementation for
+ * PMBUS_VIRT_FAN_TARGET_[1-4].
+ *
+ * TARGET, PWM and PWM_ENABLE members must be defined sequentially;
+ * pmbus core uses the difference between the provided register and
+ * it's _1 counterpart to calculate the FAN/PWM ID.
+ */
+ PMBUS_VIRT_FAN_TARGET_1,
+ PMBUS_VIRT_FAN_TARGET_2,
+ PMBUS_VIRT_FAN_TARGET_3,
+ PMBUS_VIRT_FAN_TARGET_4,
+ PMBUS_VIRT_PWM_1,
+ PMBUS_VIRT_PWM_2,
+ PMBUS_VIRT_PWM_3,
+ PMBUS_VIRT_PWM_4,
+ PMBUS_VIRT_PWM_ENABLE_1,
+ PMBUS_VIRT_PWM_ENABLE_2,
+ PMBUS_VIRT_PWM_ENABLE_3,
+ PMBUS_VIRT_PWM_ENABLE_4,
};
/*
@@ -223,6 +250,8 @@ enum pmbus_regs {
#define PB_FAN_1_RPM BIT(6)
#define PB_FAN_1_INSTALLED BIT(7)
+enum pmbus_fan_mode { percent = 0, rpm };
+
/*
* STATUS_BYTE, STATUS_WORD (lower)
*/
@@ -313,6 +342,7 @@ enum pmbus_sensor_classes {
PSC_POWER,
PSC_TEMPERATURE,
PSC_FAN,
+ PSC_PWM,
PSC_NUM_CLASSES /* Number of power sensor classes */
};
@@ -339,6 +369,10 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_FAN34 BIT(17)
#define PMBUS_HAVE_VMON BIT(18)
#define PMBUS_HAVE_STATUS_VMON BIT(19)
+#define PMBUS_HAVE_PWM12 BIT(20)
+#define PMBUS_HAVE_PWM34 BIT(21)
+
+#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13 };
@@ -421,5 +455,12 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
int pmbus_do_remove(struct i2c_client *client);
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
*client);
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client);
#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a139940cd991..f7c47d7994e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -65,6 +65,7 @@ struct pmbus_sensor {
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
+ bool convert; /* Whether or not to apply linear/vid/direct */
int data; /* Sensor data.
Negative if there was a read error */
};
@@ -129,6 +130,27 @@ struct pmbus_debugfs_entry {
u8 reg;
};
+static const int pmbus_fan_rpm_mask[] = {
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_command_registers[] = {
+ PMBUS_FAN_COMMAND_1,
+ PMBUS_FAN_COMMAND_2,
+ PMBUS_FAN_COMMAND_3,
+ PMBUS_FAN_COMMAND_4,
+};
+
void pmbus_clear_cache(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -140,18 +162,27 @@ EXPORT_SYMBOL_GPL(pmbus_clear_cache);
int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- int rv = 0;
- int newpage;
+ int rv;
- if (page >= 0 && page != data->currpage) {
+ if (page < 0 || page == data->currpage)
+ return 0;
+
+ if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- if (newpage != page)
- rv = -EIO;
- else
- data->currpage = page;
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (rv < 0)
+ return rv;
+
+ if (rv != page)
+ return -EIO;
}
- return rv;
+
+ data->currpage = page;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
@@ -198,6 +229,28 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_GPL(pmbus_write_word_data);
+
+static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int bit;
+ int id;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ bit = pmbus_fan_rpm_mask[id];
+ rv = pmbus_update_fan(client, page, id, bit, bit, word);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -214,11 +267,38 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_write_virt_reg(client, page, reg, word);
+
return pmbus_write_word_data(client, page, reg, word);
}
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command)
+{
+ int from;
+ int rv;
+ u8 to;
+
+ from = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (from < 0)
+ return from;
+
+ to = (from & ~mask) | (config & mask);
+ if (to != from) {
+ rv = pmbus_write_byte_data(client, page,
+ pmbus_fan_config_registers[id], to);
+ if (rv < 0)
+ return rv;
+ }
+
+ return _pmbus_write_word_data(client, page,
+ pmbus_fan_command_registers[id], command);
+}
+EXPORT_SYMBOL_GPL(pmbus_update_fan);
+
int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -231,6 +311,24 @@ int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
}
EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ int id;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ rv = pmbus_get_fan_rate_device(client, page, id, rpm);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -246,8 +344,10 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_read_virt_reg(client, page, reg);
+
return pmbus_read_word_data(client, page, reg);
}
@@ -312,6 +412,68 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
return pmbus_read_byte_data(client, page, reg);
}
+static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
+ int reg)
+{
+ struct pmbus_sensor *sensor;
+
+ for (sensor = data->sensors; sensor; sensor = sensor->next) {
+ if (sensor->page == page && sensor->reg == reg)
+ return sensor;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode,
+ bool from_cache)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ bool want_rpm, have_rpm;
+ struct pmbus_sensor *s;
+ int config;
+ int reg;
+
+ want_rpm = (mode == rpm);
+
+ if (from_cache) {
+ reg = want_rpm ? PMBUS_VIRT_FAN_TARGET_1 : PMBUS_VIRT_PWM_1;
+ s = pmbus_find_sensor(data, page, reg + id);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ return s->data;
+ }
+
+ config = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (config < 0)
+ return config;
+
+ have_rpm = !!(config & pmbus_fan_rpm_mask[id]);
+ if (want_rpm == have_rpm)
+ return pmbus_read_word_data(client, page,
+ pmbus_fan_command_registers[id]);
+
+ /* Can't sensibly map between RPM and PWM, just return zero */
+ return 0;
+}
+
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, false);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_device);
+
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, true);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_cached);
+
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
_pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
@@ -513,7 +675,7 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
/* X = 1/m * (Y * 10^-R - b) */
R = -R;
/* scale result to milli-units for everything but fans */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R += 3;
b *= 1000;
}
@@ -568,6 +730,9 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
long val;
+ if (!sensor->convert)
+ return sensor->data;
+
switch (data->info->format[sensor->class]) {
case direct:
val = pmbus_reg2data_direct(data, sensor);
@@ -672,7 +837,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
/* Calculate Y = (m * X + b) * 10^R */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -703,6 +868,9 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
{
u16 regval;
+ if (!sensor->convert)
+ return val;
+
switch (data->info->format[sensor->class]) {
case direct:
regval = pmbus_data2reg_direct(data, sensor, val);
@@ -915,7 +1083,8 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
const char *name, const char *type,
int seq, int page, int reg,
enum pmbus_sensor_classes class,
- bool update, bool readonly)
+ bool update, bool readonly,
+ bool convert)
{
struct pmbus_sensor *sensor;
struct device_attribute *a;
@@ -925,12 +1094,18 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
return NULL;
a = &sensor->attribute;
- snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
- name, seq, type);
+ if (type)
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ else
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d",
+ name, seq);
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
+ sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
pmbus_show_sensor, pmbus_set_sensor);
@@ -1029,7 +1204,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client,
curr = pmbus_add_sensor(data, name, l->attr, index,
page, l->reg, attr->class,
attr->update || l->update,
- false);
+ false, true);
if (!curr)
return -ENOMEM;
if (l->sbit && (info->func[page] & attr->sfunc)) {
@@ -1068,7 +1243,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return ret;
}
base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true);
+ attr->class, true, true, true);
if (!base)
return -ENOMEM;
if (attr->sfunc) {
@@ -1592,13 +1767,6 @@ static const int pmbus_fan_registers[] = {
PMBUS_READ_FAN_SPEED_4
};
-static const int pmbus_fan_config_registers[] = {
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_34,
- PMBUS_FAN_CONFIG_34
-};
-
static const int pmbus_fan_status_registers[] = {
PMBUS_STATUS_FAN_12,
PMBUS_STATUS_FAN_12,
@@ -1621,6 +1789,42 @@ static const u32 pmbus_fan_status_flags[] = {
};
/* Fans */
+
+/* Precondition: FAN_CONFIG_x_y and FAN_COMMAND_x must exist for the fan ID */
+static int pmbus_add_fan_ctrl(struct i2c_client *client,
+ struct pmbus_data *data, int index, int page, int id,
+ u8 config)
+{
+ struct pmbus_sensor *sensor;
+
+ sensor = pmbus_add_sensor(data, "fan", "target", index, page,
+ PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
+ (data->info->func[page] & PMBUS_HAVE_PWM34)))
+ return 0;
+
+ sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
+ PMBUS_VIRT_PWM_1 + id, PSC_PWM,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
+ PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
+ true, false, false);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int pmbus_add_fan_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1655,9 +1859,18 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
if (pmbus_add_sensor(data, "fan", "input", index,
page, pmbus_fan_registers[f],
- PSC_FAN, true, true) == NULL)
+ PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
+ /* Fan control */
+ if (pmbus_check_word_register(client, page,
+ pmbus_fan_command_registers[f])) {
+ ret = pmbus_add_fan_ctrl(client, data, index,
+ page, f, regval);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Each fan status register covers multiple fans,
* so we have to do some magic.
@@ -2168,6 +2381,14 @@ int pmbus_do_remove(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->debugfs;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_debugfs_dir);
+
static int __init pmbus_core_init(void)
{
pmbus_debugfs_dir = debugfs_create_dir("pmbus", NULL);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 25d28343ba93..2be77752cd56 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -179,6 +179,7 @@ struct sht15_data {
* sht15_crc8() - compute crc8
* @data: sht15 specific data.
* @value: sht15 retrieved data.
+ * @len: Length of retrieved data
*
* This implements section 2 of the CRC datasheet.
*/
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 06706d288355..190e7b39ce32 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -41,7 +41,7 @@
/**
* struct sht21 - SHT21 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: I2C client device
* @lock: mutex to protect measurement values
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 6ea99cd6ae79..370b57dafab7 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -732,6 +732,13 @@ static int sht3x_probe(struct i2c_client *client,
mutex_init(&data->i2c_lock);
mutex_init(&data->data_lock);
+ /*
+ * An attempt to read limits register too early
+ * causes a NACK response from the chip.
+ * Waiting for an empirical delay of 500 us solves the issue.
+ */
+ usleep_range(500, 600);
+
ret = limits_update(data);
if (ret)
return ret;
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
new file mode 100644
index 000000000000..e858093ac806
--- /dev/null
+++ b/drivers/hwmon/w83773g.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Driver for the Nuvoton W83773G SMBus temperature sensor IC.
+ * Supported models: W83773G
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/* W83773 has 3 channels */
+#define W83773_CHANNELS 3
+
+/* The W83773 registers */
+#define W83773_CONVERSION_RATE_REG_READ 0x04
+#define W83773_CONVERSION_RATE_REG_WRITE 0x0A
+#define W83773_MANUFACTURER_ID_REG 0xFE
+#define W83773_LOCAL_TEMP 0x00
+
+static const u8 W83773_STATUS[2] = { 0x02, 0x17 };
+
+static const u8 W83773_TEMP_LSB[2] = { 0x10, 0x25 };
+static const u8 W83773_TEMP_MSB[2] = { 0x01, 0x24 };
+
+static const u8 W83773_OFFSET_LSB[2] = { 0x12, 0x16 };
+static const u8 W83773_OFFSET_MSB[2] = { 0x11, 0x15 };
+
+/* this is the number of sensors in the device */
+static const struct i2c_device_id w83773_id[] = {
+ { "w83773g" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, w83773_id);
+
+static const struct of_device_id w83773_of_match[] = {
+ {
+ .compatible = "nuvoton,w83773g"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, w83773_of_match);
+
+static inline long temp_of_local(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline long temp_of_remote(s8 hb, u8 lb)
+{
+ return (hb << 3 | lb >> 5) * 125;
+}
+
+static int get_local_temp(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_LOCAL_TEMP, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_local(regval);
+ return 0;
+}
+
+static int get_remote_temp(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int get_fault(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_STATUS[index], &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = (regval & 0x04) >> 2;
+ return 0;
+}
+
+static int get_offset(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int set_offset(struct regmap *regmap, int index, long val)
+{
+ int ret;
+ u8 high_byte;
+ u8 low_byte;
+
+ val = clamp_val(val, -127825, 127825);
+ /* offset value equals to (high_byte << 3 | low_byte >> 5) * 125 */
+ val /= 125;
+ high_byte = val >> 3;
+ low_byte = (val & 0x07) << 5;
+
+ ret = regmap_write(regmap, W83773_OFFSET_MSB[index], high_byte);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, W83773_OFFSET_LSB[index], low_byte);
+}
+
+static int get_update_interval(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_CONVERSION_RATE_REG_READ, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = 16000 >> regval;
+ return 0;
+}
+
+static int set_update_interval(struct regmap *regmap, long val)
+{
+ int rate;
+
+ /*
+ * For valid rates, interval can be calculated as
+ * interval = (1 << (8 - rate)) * 62.5;
+ * Rounded rate is therefore
+ * rate = 8 - __fls(interval * 8 / (62.5 * 7));
+ * Use clamp_val() to avoid overflows, and to ensure valid input
+ * for __fls.
+ */
+ val = clamp_val(val, 62, 16000) * 10;
+ rate = 8 - __fls((val * 8 / (625 * 7)));
+ return regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, rate);
+}
+
+static int w83773_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval)
+ return get_update_interval(regmap, val);
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (channel == 0)
+ return get_local_temp(regmap, val);
+ return get_remote_temp(regmap, channel - 1, val);
+ case hwmon_temp_fault:
+ return get_fault(regmap, channel - 1, val);
+ case hwmon_temp_offset:
+ return get_offset(regmap, channel - 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int w83773_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return set_update_interval(regmap, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return set_offset(regmap, channel - 1, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_offset:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const u32 w83773_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_chip = {
+ .type = hwmon_chip,
+ .config = w83773_chip_config,
+};
+
+static const u32 w83773_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_temp = {
+ .type = hwmon_temp,
+ .config = w83773_temp_config,
+};
+
+static const struct hwmon_channel_info *w83773_info[] = {
+ &w83773_chip,
+ &w83773_temp,
+ NULL
+};
+
+static const struct hwmon_ops w83773_ops = {
+ .is_visible = w83773_is_visible,
+ .read = w83773_read,
+ .write = w83773_write,
+};
+
+static const struct hwmon_chip_info w83773_chip_info = {
+ .ops = &w83773_ops,
+ .info = w83773_info,
+};
+
+static const struct regmap_config w83773_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int w83773_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &w83773_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Set the conversion rate to 2 Hz */
+ ret = regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, 0x05);
+ if (ret < 0) {
+ dev_err(&client->dev, "error writing config rate register\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ regmap,
+ &w83773_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver w83773_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "w83773g",
+ .of_match_table = of_match_ptr(w83773_of_match),
+ },
+ .probe = w83773_probe,
+ .id_table = w83773_id,
+};
+
+module_i2c_driver(w83773_driver);
+
+MODULE_AUTHOR("Lei YU <mine260309@gmail.com>");
+MODULE_DESCRIPTION("W83773G temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index a18794128bf8..7c375443ede6 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -104,26 +104,17 @@ static int of_coresight_alloc_memory(struct device *dev,
int of_coresight_get_cpu(const struct device_node *node)
{
int cpu;
- bool found;
- struct device_node *dn, *np;
+ struct device_node *dn;
dn = of_parse_phandle(node, "cpu", 0);
-
/* Affinity defaults to CPU0 */
if (!dn)
return 0;
-
- for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
- found = (dn == np);
- of_node_put(np);
- if (found)
- break;
- }
+ cpu = of_cpu_node_to_id(dn);
of_node_put(dn);
/* Affinity to CPU0 if no cpu nodes are found */
- return found ? cpu : 0;
+ return (cpu < 0) ? 0 : cpu;
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 21bf619a86c5..9fee4c054d3d 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -280,8 +280,6 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_disabled;
- bool suspended;
- bool skip_resume;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 58add69a441c..153b947702c5 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -42,6 +42,7 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include "i2c-designware-core.h"
@@ -372,6 +373,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
@@ -435,12 +441,24 @@ MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_prepare(struct device *dev)
{
- return pm_runtime_suspended(dev);
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
}
static void dw_i2c_plat_complete(struct device *dev)
{
- if (dev->power.direct_complete)
+ /*
+ * The device can only be in runtime suspend at this point if it has not
+ * been resumed throughout the ending system suspend/resume cycle, so if
+ * the platform firmware might mess up with it, request the runtime PM
+ * framework to resume it.
+ */
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
#else
@@ -453,16 +471,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (i_dev->suspended) {
- i_dev->skip_resume = true;
- return 0;
- }
-
i_dev->disable(i_dev);
i2c_dw_plat_prepare_clk(i_dev, false);
- i_dev->suspended = true;
-
return 0;
}
@@ -470,19 +481,9 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (!i_dev->suspended)
- return 0;
-
- if (i_dev->skip_resume) {
- i_dev->skip_resume = false;
- return 0;
- }
-
i2c_dw_plat_prepare_clk(i_dev, true);
i_dev->init(i_dev);
- i_dev->suspended = false;
-
return 0;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ef86296b8b0d..39e3b345a6c8 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -629,6 +629,18 @@ config SPEAR_ADC
To compile this driver as a module, choose M here: the
module will be called spear_adc.
+config SD_ADC_MODULATOR
+ tristate "Generic sigma delta modulator"
+ depends on OF
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Select this option to enables sigma delta modulator. This driver can
+ support generic sigma delta modulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called sd_adc_modulator.
+
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
@@ -656,6 +668,31 @@ config STM32_ADC
This driver can also be built as a module. If so, the module
will be called stm32-adc.
+config STM32_DFSDM_CORE
+ tristate "STMicroelectronics STM32 DFSDM core"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Select this option to enable the driver for STMicroelectronics
+ STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-core.
+
+config STM32_DFSDM_ADC
+ tristate "STMicroelectronics STM32 dfsdm adc"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select STM32_DFSDM_CORE
+ select REGMAP_MMIO
+ select IIO_BUFFER_HW_CONSUMER
+ help
+ Select this option to support ADCSigma delta modulator for
+ STMicroelectronics STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on PC104 && X86 && ISA_BUS_API
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 9572c1090f35..28a9423997f3 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
+obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
+obj-$(CONFIG_STM32_DFSDM_ADC) += stm32-dfsdm-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
@@ -82,3 +84,4 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
new file mode 100644
index 000000000000..560d8c7d9d86
--- /dev/null
+++ b/drivers/iio/adc/sd_adc_modulator.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic sigma delta modulator driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+static const struct iio_info iio_sd_mod_iio_info;
+
+static const struct iio_chan_spec iio_sd_mod_ch = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 1,
+ .shift = 0,
+ },
+};
+
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *iio;
+
+ iio = devm_iio_device_alloc(dev, 0);
+ if (!iio)
+ return -ENOMEM;
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = dev->of_node;
+ iio->name = dev_name(dev);
+ iio->info = &iio_sd_mod_iio_info;
+ iio->modes = INDIO_BUFFER_HARDWARE;
+
+ iio->num_channels = 1;
+ iio->channels = &iio_sd_mod_ch;
+
+ platform_set_drvdata(pdev, iio);
+
+ return devm_iio_device_register(&pdev->dev, iio);
+}
+
+static const struct of_device_id sd_adc_of_match[] = {
+ { .compatible = "sd-modulator" },
+ { .compatible = "ads1201" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sd_adc_of_match);
+
+static struct platform_driver iio_sd_mod_adc = {
+ .driver = {
+ .name = "iio_sd_adc_mod",
+ .of_match_table = of_match_ptr(sd_adc_of_match),
+ },
+ .probe = iio_sd_mod_probe,
+};
+
+module_platform_driver(iio_sd_mod_adc);
+
+MODULE_DESCRIPTION("Basic sigma delta modulator");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
new file mode 100644
index 000000000000..daa026d6a94f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is the ADC part of the STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+#define DFSDM_DMA_BUFFER_SIZE (4 * PAGE_SIZE)
+
+/* Conversion timeout */
+#define DFSDM_TIMEOUT_US 100000
+#define DFSDM_TIMEOUT (msecs_to_jiffies(DFSDM_TIMEOUT_US / 1000))
+
+/* Oversampling attribute default */
+#define DFSDM_DEFAULT_OVERSAMPLING 100
+
+/* Oversampling max values */
+#define DFSDM_MAX_INT_OVERSAMPLING 256
+#define DFSDM_MAX_FL_OVERSAMPLING 1024
+
+/* Max sample resolutions */
+#define DFSDM_MAX_RES BIT(31)
+#define DFSDM_DATA_RES BIT(23)
+
+enum sd_converter_type {
+ DFSDM_AUDIO,
+ DFSDM_IIO,
+};
+
+struct stm32_dfsdm_dev_data {
+ int type;
+ int (*init)(struct iio_dev *indio_dev);
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+struct stm32_dfsdm_adc {
+ struct stm32_dfsdm *dfsdm;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ unsigned int fl_id;
+ unsigned int ch_id;
+
+ /* ADC specific */
+ unsigned int oversamp;
+ struct iio_hw_consumer *hwc;
+ struct completion completion;
+ u32 *buffer;
+
+ /* Audio specific */
+ unsigned int spi_freq; /* SPI bus clock frequency */
+ unsigned int sample_freq; /* Sample frequency after filter decimation */
+ int (*cb)(const void *data, size_t size, void *cb_priv);
+ void *cb_priv;
+
+ /* DMA */
+ u8 *rx_buf;
+ unsigned int bufi; /* Buffer current position */
+ unsigned int buf_sz; /* Buffer size */
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_buf;
+};
+
+struct stm32_dfsdm_str2field {
+ const char *name;
+ unsigned int val;
+};
+
+/* DFSDM channel serial interface type */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_type[] = {
+ { "SPI_R", 0 }, /* SPI with data on rising edge */
+ { "SPI_F", 1 }, /* SPI with data on falling edge */
+ { "MANCH_R", 2 }, /* Manchester codec, rising edge = logic 0 */
+ { "MANCH_F", 3 }, /* Manchester codec, falling edge = logic 1 */
+ {},
+};
+
+/* DFSDM channel clock source */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_src[] = {
+ /* External SPI clock (CLKIN x) */
+ { "CLKIN", DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL },
+ /* Internal SPI clock (CLKOUT) */
+ { "CLKOUT", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_F", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_R", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING },
+ {},
+};
+
+static int stm32_dfsdm_str2val(const char *str,
+ const struct stm32_dfsdm_str2field *list)
+{
+ const struct stm32_dfsdm_str2field *p = list;
+
+ for (p = list; p && p->name; p++)
+ if (!strcmp(p->name, str))
+ return p->val;
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ unsigned int fast, unsigned int oversamp)
+{
+ unsigned int i, d, fosr, iosr;
+ u64 res;
+ s64 delta;
+ unsigned int m = 1; /* multiplication factor */
+ unsigned int p = fl->ford; /* filter order (ford) */
+
+ pr_debug("%s: Requested oversampling: %d\n", __func__, oversamp);
+ /*
+ * This function tries to compute filter oversampling and integrator
+ * oversampling, base on oversampling ratio requested by user.
+ *
+ * Decimation d depends on the filter order and the oversampling ratios.
+ * ford: filter order
+ * fosr: filter over sampling ratio
+ * iosr: integrator over sampling ratio
+ */
+ if (fl->ford == DFSDM_FASTSINC_ORDER) {
+ m = 2;
+ p = 2;
+ }
+
+ /*
+ * Look for filter and integrator oversampling ratios which allows
+ * to reach 24 bits data output resolution.
+ * Leave as soon as if exact resolution if reached.
+ * Otherwise the higher resolution below 32 bits is kept.
+ */
+ for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+ for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+ if (fast)
+ d = fosr * iosr;
+ else if (fl->ford == DFSDM_FASTSINC_ORDER)
+ d = fosr * (iosr + 3) + 2;
+ else
+ d = fosr * (iosr - 1 + p) + p;
+
+ if (d > oversamp)
+ break;
+ else if (d != oversamp)
+ continue;
+ /*
+ * Check resolution (limited to signed 32 bits)
+ * res <= 2^31
+ * Sincx filters:
+ * res = m * fosr^p x iosr (with m=1, p=ford)
+ * FastSinc filter
+ * res = m * fosr^p x iosr (with m=2, p=2)
+ */
+ res = fosr;
+ for (i = p - 1; i > 0; i--) {
+ res = res * (u64)fosr;
+ if (res > DFSDM_MAX_RES)
+ break;
+ }
+ if (res > DFSDM_MAX_RES)
+ continue;
+ res = res * (u64)m * (u64)iosr;
+ if (res > DFSDM_MAX_RES)
+ continue;
+
+ delta = res - DFSDM_DATA_RES;
+
+ if (res >= fl->res) {
+ fl->res = res;
+ fl->fosr = fosr;
+ fl->iosr = iosr;
+ fl->fast = fast;
+ pr_debug("%s: fosr = %d, iosr = %d\n",
+ __func__, fl->fosr, fl->iosr);
+ }
+
+ if (!delta)
+ return 0;
+ }
+ }
+
+ if (!fl->fosr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_dfsdm_start_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK,
+ DFSDM_CHCFGR1_CHEN(1));
+}
+
+static void stm32_dfsdm_stop_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK, DFSDM_CHCFGR1_CHEN(0));
+}
+
+static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
+ struct stm32_dfsdm_channel *ch)
+{
+ unsigned int id = ch->id;
+ struct regmap *regmap = dfsdm->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SITP_MASK,
+ DFSDM_CHCFGR1_SITP(ch->type));
+ if (ret < 0)
+ return ret;
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SPICKSEL_MASK,
+ DFSDM_CHCFGR1_SPICKSEL(ch->src));
+ if (ret < 0)
+ return ret;
+ return regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_CHINSEL_MASK,
+ DFSDM_CHCFGR1_CHINSEL(ch->alt_si));
+}
+
+static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id)
+{
+ int ret;
+
+ /* Enable filter */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(1));
+ if (ret < 0)
+ return ret;
+
+ /* Start conversion */
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSWSTART_MASK,
+ DFSDM_CR1_RSWSTART(1));
+}
+
+static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm, unsigned int fl_id)
+{
+ /* Disable conversion */
+ regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(0));
+}
+
+static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id, unsigned int ch_id)
+{
+ struct regmap *regmap = dfsdm->regmap;
+ struct stm32_dfsdm_filter *fl = &dfsdm->fl_list[fl_id];
+ int ret;
+
+ /* Average integrator oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+ DFSDM_FCR_IOSR(fl->iosr - 1));
+ if (ret)
+ return ret;
+
+ /* Filter order and Oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+ DFSDM_FCR_FOSR(fl->fosr - 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FORD_MASK,
+ DFSDM_FCR_FORD(fl->ford));
+ if (ret)
+ return ret;
+
+ /* No scan mode supported for the moment */
+ ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_RCH_MASK,
+ DFSDM_CR1_RCH(ch_id));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSYNC_MASK,
+ DFSDM_CR1_RSYNC(fl->sync_mode));
+}
+
+static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_channel *df_ch;
+ const char *of_str;
+ int chan_idx = ch->scan_index;
+ int ret, val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-channels", chan_idx,
+ &ch->channel);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channels' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+ if (ch->channel >= dfsdm->num_chs) {
+ dev_err(&indio_dev->dev,
+ " Error bad channel number %d (max = %d)\n",
+ ch->channel, dfsdm->num_chs);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-names", chan_idx,
+ &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channel-names' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+
+ df_ch = &dfsdm->ch_list[ch->channel];
+ df_ch->id = ch->channel;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-types", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->type = val;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-clk-src", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->src = val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-alt-channel", chan_idx,
+ &df_ch->alt_si);
+ if (ret < 0)
+ df_ch->alt_si = 0;
+
+ return 0;
+}
+
+static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
+}
+
+static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int sample_freq = adc->sample_freq;
+ unsigned int spi_freq;
+ int ret;
+
+ dev_err(&indio_dev->dev, "enter %s\n", __func__);
+ /* If DFSDM is master on SPI, SPI freq can not be updated */
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ return -EPERM;
+
+ ret = kstrtoint(buf, 0, &spi_freq);
+ if (ret)
+ return ret;
+
+ if (!spi_freq)
+ return -EINVAL;
+
+ if (sample_freq) {
+ if (spi_freq % sample_freq)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / sample_freq));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / sample_freq));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "No filter parameters that match!\n");
+ return ret;
+ }
+ }
+ adc->spi_freq = spi_freq;
+
+ return len;
+}
+
+static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+ int ret;
+ unsigned int dma_en = 0, cont_en = 0;
+
+ ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
+ adc->ch_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ if (dma) {
+ /* Enable DMA transfer*/
+ dma_en = DFSDM_CR1_RDMAEN(1);
+ /* Enable conversion triggered by SPI clock*/
+ cont_en = DFSDM_CR1_RCONT(1);
+ }
+ /* Enable DMA transfer*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, dma_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ /* Enable conversion triggered by SPI clock*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, cont_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ ret = stm32_dfsdm_start_filter(adc->dfsdm, adc->fl_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ return 0;
+
+stop_channels:
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
+
+ return ret;
+}
+
+static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+
+ stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
+
+ /* Clean conversion options */
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
+}
+
+static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
+ unsigned int val)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ unsigned int watermark = DFSDM_DMA_BUFFER_SIZE / 2;
+
+ /*
+ * DMA cyclic transfers are used, buffer is split into two periods.
+ * There should be :
+ * - always one buffer (period) DMA is working on
+ * - one buffer (period) driver pushed to ASoC side.
+ */
+ watermark = min(watermark, val * (unsigned int)(sizeof(u32)));
+ adc->buf_sz = watermark * 2;
+
+ return 0;
+}
+
+static unsigned int stm32_dfsdm_adc_dma_residue(struct stm32_dfsdm_adc *adc)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(adc->dma_chan,
+ adc->dma_chan->cookie,
+ &state);
+ if (status == DMA_IN_PROGRESS) {
+ /* Residue is size in bytes from end of buffer */
+ unsigned int i = adc->buf_sz - state.residue;
+ unsigned int size;
+
+ /* Return available bytes */
+ if (i >= adc->bufi)
+ size = i - adc->bufi;
+ else
+ size = adc->buf_sz + i - adc->bufi;
+
+ return size;
+ }
+
+ return 0;
+}
+
+static void stm32_dfsdm_audio_dma_buffer_done(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int available = stm32_dfsdm_adc_dma_residue(adc);
+ size_t old_pos;
+
+ /*
+ * FIXME: In Kernel interface does not support cyclic DMA buffer,and
+ * offers only an interface to push data samples per samples.
+ * For this reason IIO buffer interface is not used and interface is
+ * bypassed using a private callback registered by ASoC.
+ * This should be a temporary solution waiting a cyclic DMA engine
+ * support in IIO.
+ */
+
+ dev_dbg(&indio_dev->dev, "%s: pos = %d, available = %d\n", __func__,
+ adc->bufi, available);
+ old_pos = adc->bufi;
+
+ while (available >= indio_dev->scan_bytes) {
+ u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
+
+ /* Mask 8 LSB that contains the channel ID */
+ *buffer = (*buffer & 0xFFFFFF00) << 8;
+ available -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->buf_sz) {
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos],
+ adc->buf_sz - old_pos, adc->cb_priv);
+ adc->bufi = 0;
+ old_pos = 0;
+ }
+ }
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
+ adc->cb_priv);
+}
+
+static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
+ adc->buf_sz, adc->buf_sz / 2);
+
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
+ adc->dma_buf,
+ adc->buf_sz, adc->buf_sz / 2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = stm32_dfsdm_audio_dma_buffer_done;
+ desc->callback_param = indio_dev;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_all(adc->dma_chan);
+ return ret;
+ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(adc->dma_chan);
+
+ return 0;
+}
+
+static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ /* Reset adc buffer index */
+ adc->bufi = 0;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_start_conv(adc, true);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start conversion\n");
+ goto stop_dfsdm;
+ }
+
+ if (adc->dma_chan) {
+ ret = stm32_dfsdm_adc_dma_start(indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start DMA\n");
+ goto err_stop_conv;
+ }
+ }
+
+ return 0;
+
+err_stop_conv:
+ stm32_dfsdm_stop_conv(adc);
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan)
+ dmaengine_terminate_all(adc->dma_chan);
+
+ stm32_dfsdm_stop_conv(adc);
+
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops stm32_dfsdm_buffer_setup_ops = {
+ .postenable = &stm32_dfsdm_postenable,
+ .predisable = &stm32_dfsdm_predisable,
+};
+
+/**
+ * stm32_dfsdm_get_buff_cb() - register a callback that will be called when
+ * DMA transfer period is achieved.
+ *
+ * @iio_dev: Handle to IIO device.
+ * @cb: Pointer to callback function:
+ * - data: pointer to data buffer
+ * - size: size in byte of the data buffer
+ * - private: pointer to consumer private structure.
+ * @private: Pointer to consumer private structure.
+ */
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = cb;
+ adc->cb_priv = private;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_get_buff_cb);
+
+/**
+ * stm32_dfsdm_release_buff_cb - unregister buffer callback
+ *
+ * @iio_dev: Handle to IIO device.
+ */
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = NULL;
+ adc->cb_priv = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_release_buff_cb);
+
+static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *res)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = res;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(1));
+ if (ret < 0)
+ goto stop_dfsdm;
+
+ ret = stm32_dfsdm_start_conv(adc, false);
+ if (ret < 0) {
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+ goto stop_dfsdm;
+ }
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ DFSDM_TIMEOUT);
+
+ /* Mask IRQ for regular conversion achievement*/
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else if (timeout < 0)
+ ret = timeout;
+ else
+ ret = IIO_VAL_INT;
+
+ stm32_dfsdm_stop_conv(adc);
+
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int spi_freq = adc->spi_freq;
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = stm32_dfsdm_set_osrs(fl, 0, val);
+ if (!ret)
+ adc->oversamp = val;
+
+ return ret;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ spi_freq = adc->dfsdm->spi_master_freq;
+
+ if (spi_freq % val)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / val));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / val));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Not able to find parameter that match!\n");
+ return ret;
+ }
+ adc->sample_freq = val;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: IIO enable failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+ iio_hw_consumer_disable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: Conversion failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = adc->oversamp;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adc->sample_freq;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info stm32_dfsdm_info_audio = {
+ .hwfifo_set_watermark = stm32_dfsdm_set_watermark,
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static const struct iio_info stm32_dfsdm_info_adc = {
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
+{
+ struct stm32_dfsdm_adc *adc = arg;
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ unsigned int status, int_en;
+
+ regmap_read(regmap, DFSDM_ISR(adc->fl_id), &status);
+ regmap_read(regmap, DFSDM_CR2(adc->fl_id), &int_en);
+
+ if (status & DFSDM_ISR_REOCF_MASK) {
+ /* Read the data register clean the IRQ status */
+ regmap_read(regmap, DFSDM_RDATAR(adc->fl_id), adc->buffer);
+ complete(&adc->completion);
+ }
+
+ if (status & DFSDM_ISR_ROVRF_MASK) {
+ if (int_en & DFSDM_CR2_ROVRIE_MASK)
+ dev_warn(&indio_dev->dev, "Overrun detected\n");
+ regmap_update_bits(regmap, DFSDM_ICR(adc->fl_id),
+ DFSDM_ICR_CLRROVRF_MASK,
+ DFSDM_ICR_CLRROVRF_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define external info for SPI Frequency and audio sampling rate that can be
+ * configured by ASoC driver through consumer.h API
+ */
+static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
+ /* spi_clk_freq : clock freq on SPI/manchester bus used by channel */
+ {
+ .name = "spi_clk_freq",
+ .shared = IIO_SHARED_BY_TYPE,
+ .read = dfsdm_adc_audio_get_spiclk,
+ .write = dfsdm_adc_audio_set_spiclk,
+ },
+ {},
+};
+
+static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan) {
+ dma_free_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+ dma_release_channel(adc->dma_chan);
+ }
+}
+
+static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_slave_config config = {
+ .src_addr = (dma_addr_t)adc->dfsdm->phys_base +
+ DFSDM_RDATAR(adc->fl_id),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ int ret;
+
+ adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ &adc->dma_buf, GFP_KERNEL);
+ if (!adc->rx_buf) {
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ ret = dmaengine_slave_config(adc->dma_chan, &config);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dma_free_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+err_release:
+ dma_release_channel(adc->dma_chan);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+ if (ret < 0)
+ return ret;
+
+ ch->type = IIO_VOLTAGE;
+ ch->indexed = 1;
+
+ /*
+ * IIO_CHAN_INFO_RAW: used to compute regular conversion
+ * IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
+ */
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+
+ if (adc->dev_data->type == DFSDM_AUDIO) {
+ ch->scan_type.sign = 's';
+ ch->ext_info = dfsdm_adc_audio_ext_info;
+ } else {
+ ch->scan_type.sign = 'u';
+ }
+ ch->scan_type.realbits = 24;
+ ch->scan_type.storagebits = 32;
+ adc->ch_id = ch->channel;
+
+ return stm32_dfsdm_chan_configure(adc->dfsdm,
+ &adc->dfsdm->ch_list[ch->channel]);
+}
+
+static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_channel *d_ch;
+ int ret;
+
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+ indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
+
+ ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->scan_index = 0;
+
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ d_ch = &adc->dfsdm->ch_list[adc->ch_id];
+ if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ adc->spi_freq = adc->dfsdm->spi_master_freq;
+
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+ return stm32_dfsdm_dma_request(indio_dev);
+}
+
+static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int num_ch;
+ int ret, chan_idx;
+
+ adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+ ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+ adc->oversamp);
+ if (ret < 0)
+ return ret;
+
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
+ "st,adc-channels");
+ if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+ return num_ch < 0 ? num_ch : -EINVAL;
+ }
+
+ /* Bind to SD modulator IIO device */
+ adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+ if (IS_ERR(adc->hwc))
+ return -EPROBE_DEFER;
+
+ ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
+ GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+ ch->scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ }
+
+ indio_dev->num_channels = num_ch;
+ indio_dev->channels = ch;
+
+ init_completion(&adc->completion);
+
+ return 0;
+}
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_adc_data = {
+ .type = DFSDM_IIO,
+ .init = stm32_dfsdm_adc_init,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_audio_data = {
+ .type = DFSDM_AUDIO,
+ .init = stm32_dfsdm_audio_init,
+};
+
+static const struct of_device_id stm32_dfsdm_adc_match[] = {
+ {
+ .compatible = "st,stm32-dfsdm-adc",
+ .data = &stm32h7_dfsdm_adc_data,
+ },
+ {
+ .compatible = "st,stm32-dfsdm-dmic",
+ .data = &stm32h7_dfsdm_audio_data,
+ },
+ {}
+};
+
+static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_dfsdm_adc *adc;
+ struct device_node *np = dev->of_node;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct iio_dev *iio;
+ char *name;
+ int ret, irq, val;
+
+
+ dev_data = of_device_get_match_data(dev);
+ iio = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!iio) {
+ dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(iio);
+ adc->dfsdm = dev_get_drvdata(dev->parent);
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = np;
+ iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(dev->of_node, "reg", &adc->fl_id);
+ if (ret != 0) {
+ dev_err(dev, "Missing reg property\n");
+ return -EINVAL;
+ }
+
+ name = devm_kzalloc(dev, sizeof("dfsdm-adc0"), GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ if (dev_data->type == DFSDM_AUDIO) {
+ iio->info = &stm32_dfsdm_info_audio;
+ snprintf(name, sizeof("dfsdm-pdm0"), "dfsdm-pdm%d", adc->fl_id);
+ } else {
+ iio->info = &stm32_dfsdm_info_adc;
+ snprintf(name, sizeof("dfsdm-adc0"), "dfsdm-adc%d", adc->fl_id);
+ }
+ iio->name = name;
+
+ /*
+ * In a first step IRQs generated for channels are not treated.
+ * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
+ */
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
+ 0, pdev->name, adc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "st,filter-order", &val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set filter order\n");
+ return ret;
+ }
+
+ adc->dfsdm->fl_list[adc->fl_id].ford = val;
+
+ ret = of_property_read_u32(dev->of_node, "st,filter0-sync", &val);
+ if (!ret)
+ adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
+
+ adc->dev_data = dev_data;
+ ret = dev_data->init(iio);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(iio);
+ if (ret < 0)
+ goto err_cleanup;
+
+ dev_err(dev, "of_platform_populate\n");
+ if (dev_data->type == DFSDM_AUDIO) {
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find an audio DAI\n");
+ goto err_unregister;
+ }
+ }
+
+ return 0;
+
+err_unregister:
+ iio_device_unregister(iio);
+err_cleanup:
+ stm32_dfsdm_dma_release(iio);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_dfsdm_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ if (adc->dev_data->type == DFSDM_AUDIO)
+ of_platform_depopulate(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ stm32_dfsdm_dma_release(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm32_dfsdm_adc_driver = {
+ .driver = {
+ .name = "stm32-dfsdm-adc",
+ .of_match_table = stm32_dfsdm_adc_match,
+ },
+ .probe = stm32_dfsdm_adc_probe,
+ .remove = stm32_dfsdm_adc_remove,
+};
+module_platform_driver(stm32_dfsdm_adc_driver);
+
+MODULE_DESCRIPTION("STM32 sigma delta ADC");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
new file mode 100644
index 000000000000..6290332cfd3f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part the core part STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+struct stm32_dfsdm_dev_data {
+ unsigned int num_filters;
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+#define STM32H7_DFSDM_NUM_FILTERS 4
+#define STM32H7_DFSDM_NUM_CHANNELS 8
+
+static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg < DFSDM_FILTER_BASE_ADR)
+ return false;
+
+ /*
+ * Mask is done on register to avoid to list registers of all
+ * filter instances.
+ */
+ switch (reg & DFSDM_FILTER_REG_MASK) {
+ case DFSDM_CR1(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_ISR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_JDATAR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_RDATAR(0) & DFSDM_FILTER_REG_MASK:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config stm32h7_dfsdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x2B8,
+ .volatile_reg = stm32_dfsdm_volatile_reg,
+ .fast_io = true,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_data = {
+ .num_filters = STM32H7_DFSDM_NUM_FILTERS,
+ .num_channels = STM32H7_DFSDM_NUM_CHANNELS,
+ .regmap_cfg = &stm32h7_dfsdm_regmap_cfg,
+};
+
+struct dfsdm_priv {
+ struct platform_device *pdev; /* platform device */
+
+ struct stm32_dfsdm dfsdm; /* common data exported for all instances */
+
+ unsigned int spi_clk_out_div; /* SPI clkout divider value */
+ atomic_t n_active_ch; /* number of current active channels */
+
+ struct clk *clk; /* DFSDM clock */
+ struct clk *aclk; /* audio clock */
+};
+
+/**
+ * stm32_dfsdm_start_dfsdm - start global dfsdm interface.
+ *
+ * Enable interface if n_active_ch is not null.
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ struct device *dev = &priv->pdev->dev;
+ unsigned int clk_div = priv->spi_clk_out_div;
+ int ret;
+
+ if (atomic_inc_return(&priv->n_active_ch) == 1) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start clock\n");
+ goto error_ret;
+ }
+ if (priv->aclk) {
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start audio clock\n");
+ goto disable_clk;
+ }
+ }
+
+ /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(clk_div));
+ if (ret < 0)
+ goto disable_aclk;
+
+ /* Global enable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(1));
+ if (ret < 0)
+ goto disable_aclk;
+ }
+
+ dev_dbg(dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+
+disable_aclk:
+ clk_disable_unprepare(priv->aclk);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+error_ret:
+ atomic_dec(&priv->n_active_ch);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_start_dfsdm);
+
+/**
+ * stm32_dfsdm_stop_dfsdm - stop global DFSDM interface.
+ *
+ * Disable interface if n_active_ch is null
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ int ret;
+
+ if (atomic_dec_and_test(&priv->n_active_ch)) {
+ /* Global disable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(0));
+ if (ret < 0)
+ return ret;
+
+ /* Stop SPI CLKOUT */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(0));
+ if (ret < 0)
+ return ret;
+
+ clk_disable_unprepare(priv->clk);
+ if (priv->aclk)
+ clk_disable_unprepare(priv->aclk);
+ }
+ dev_dbg(&priv->pdev->dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_stop_dfsdm);
+
+static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+ struct dfsdm_priv *priv)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+ unsigned long clk_freq;
+ unsigned int spi_freq, rem;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return -ENODEV;
+ }
+ priv->dfsdm.phys_base = res->start;
+ priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+
+ /*
+ * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
+ * "dfsdm" or "audio" clocks can be used as source clock for
+ * the SPI clock out signal and internal processing, depending
+ * on use case.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
+ return -EINVAL;
+ }
+
+ priv->aclk = devm_clk_get(&pdev->dev, "audio");
+ if (IS_ERR(priv->aclk))
+ priv->aclk = NULL;
+
+ if (priv->aclk)
+ clk_freq = clk_get_rate(priv->aclk);
+ else
+ clk_freq = clk_get_rate(priv->clk);
+
+ /* SPI clock out frequency */
+ ret = of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &spi_freq);
+ if (ret < 0) {
+ /* No SPI master mode */
+ return 0;
+ }
+
+ priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
+ priv->dfsdm.spi_master_freq = spi_freq;
+
+ if (rem) {
+ dev_warn(&pdev->dev, "SPI clock not accurate\n");
+ dev_warn(&pdev->dev, "%ld = %d * %d + %d\n",
+ clk_freq, spi_freq, priv->spi_clk_out_div + 1, rem);
+ }
+
+ return 0;
+};
+
+static const struct of_device_id stm32_dfsdm_of_match[] = {
+ {
+ .compatible = "st,stm32h7-dfsdm",
+ .data = &stm32h7_dfsdm_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
+
+static int stm32_dfsdm_probe(struct platform_device *pdev)
+{
+ struct dfsdm_priv *priv;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct stm32_dfsdm *dfsdm;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+
+ dev_data = of_device_get_match_data(&pdev->dev);
+
+ dfsdm = &priv->dfsdm;
+ dfsdm->fl_list = devm_kcalloc(&pdev->dev, dev_data->num_filters,
+ sizeof(*dfsdm->fl_list), GFP_KERNEL);
+ if (!dfsdm->fl_list)
+ return -ENOMEM;
+
+ dfsdm->num_fls = dev_data->num_filters;
+ dfsdm->ch_list = devm_kcalloc(&pdev->dev, dev_data->num_channels,
+ sizeof(*dfsdm->ch_list),
+ GFP_KERNEL);
+ if (!dfsdm->ch_list)
+ return -ENOMEM;
+ dfsdm->num_chs = dev_data->num_channels;
+
+ ret = stm32_dfsdm_parse_of(pdev, priv);
+ if (ret < 0)
+ return ret;
+
+ dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
+ dfsdm->base,
+ &stm32h7_dfsdm_regmap_cfg);
+ if (IS_ERR(dfsdm->regmap)) {
+ ret = PTR_ERR(dfsdm->regmap);
+ dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dfsdm);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static struct platform_driver stm32_dfsdm_driver = {
+ .probe = stm32_dfsdm_probe,
+ .driver = {
+ .name = "stm32-dfsdm",
+ .of_match_table = stm32_dfsdm_of_match,
+ },
+};
+
+module_platform_driver(stm32_dfsdm_driver);
+
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 dfsdm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
new file mode 100644
index 000000000000..8708394b0725
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is part of STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef MDF_STM32_DFSDM__H
+#define MDF_STM32_DFSDM__H
+
+#include <linux/bitfield.h>
+
+/*
+ * STM32 DFSDM - global register map
+ * ________________________________________________________
+ * | Offset | Registers block |
+ * --------------------------------------------------------
+ * | 0x000 | CHANNEL 0 + COMMON CHANNEL FIELDS |
+ * --------------------------------------------------------
+ * | 0x020 | CHANNEL 1 |
+ * --------------------------------------------------------
+ * | ... | ..... |
+ * --------------------------------------------------------
+ * | 0x0E0 | CHANNEL 7 |
+ * --------------------------------------------------------
+ * | 0x100 | FILTER 0 + COMMON FILTER FIELDs |
+ * --------------------------------------------------------
+ * | 0x200 | FILTER 1 |
+ * --------------------------------------------------------
+ * | 0x300 | FILTER 2 |
+ * --------------------------------------------------------
+ * | 0x400 | FILTER 3 |
+ * --------------------------------------------------------
+ */
+
+/*
+ * Channels register definitions
+ */
+#define DFSDM_CHCFGR1(y) ((y) * 0x20 + 0x00)
+#define DFSDM_CHCFGR2(y) ((y) * 0x20 + 0x04)
+#define DFSDM_AWSCDR(y) ((y) * 0x20 + 0x08)
+#define DFSDM_CHWDATR(y) ((y) * 0x20 + 0x0C)
+#define DFSDM_CHDATINR(y) ((y) * 0x20 + 0x10)
+
+/* CHCFGR1: Channel configuration register 1 */
+#define DFSDM_CHCFGR1_SITP_MASK GENMASK(1, 0)
+#define DFSDM_CHCFGR1_SITP(v) FIELD_PREP(DFSDM_CHCFGR1_SITP_MASK, v)
+#define DFSDM_CHCFGR1_SPICKSEL_MASK GENMASK(3, 2)
+#define DFSDM_CHCFGR1_SPICKSEL(v) FIELD_PREP(DFSDM_CHCFGR1_SPICKSEL_MASK, v)
+#define DFSDM_CHCFGR1_SCDEN_MASK BIT(5)
+#define DFSDM_CHCFGR1_SCDEN(v) FIELD_PREP(DFSDM_CHCFGR1_SCDEN_MASK, v)
+#define DFSDM_CHCFGR1_CKABEN_MASK BIT(6)
+#define DFSDM_CHCFGR1_CKABEN(v) FIELD_PREP(DFSDM_CHCFGR1_CKABEN_MASK, v)
+#define DFSDM_CHCFGR1_CHEN_MASK BIT(7)
+#define DFSDM_CHCFGR1_CHEN(v) FIELD_PREP(DFSDM_CHCFGR1_CHEN_MASK, v)
+#define DFSDM_CHCFGR1_CHINSEL_MASK BIT(8)
+#define DFSDM_CHCFGR1_CHINSEL(v) FIELD_PREP(DFSDM_CHCFGR1_CHINSEL_MASK, v)
+#define DFSDM_CHCFGR1_DATMPX_MASK GENMASK(13, 12)
+#define DFSDM_CHCFGR1_DATMPX(v) FIELD_PREP(DFSDM_CHCFGR1_DATMPX_MASK, v)
+#define DFSDM_CHCFGR1_DATPACK_MASK GENMASK(15, 14)
+#define DFSDM_CHCFGR1_DATPACK(v) FIELD_PREP(DFSDM_CHCFGR1_DATPACK_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTDIV_MASK GENMASK(23, 16)
+#define DFSDM_CHCFGR1_CKOUTDIV(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTDIV_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTSRC_MASK BIT(30)
+#define DFSDM_CHCFGR1_CKOUTSRC(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTSRC_MASK, v)
+#define DFSDM_CHCFGR1_DFSDMEN_MASK BIT(31)
+#define DFSDM_CHCFGR1_DFSDMEN(v) FIELD_PREP(DFSDM_CHCFGR1_DFSDMEN_MASK, v)
+
+/* CHCFGR2: Channel configuration register 2 */
+#define DFSDM_CHCFGR2_DTRBS_MASK GENMASK(7, 3)
+#define DFSDM_CHCFGR2_DTRBS(v) FIELD_PREP(DFSDM_CHCFGR2_DTRBS_MASK, v)
+#define DFSDM_CHCFGR2_OFFSET_MASK GENMASK(31, 8)
+#define DFSDM_CHCFGR2_OFFSET(v) FIELD_PREP(DFSDM_CHCFGR2_OFFSET_MASK, v)
+
+/* AWSCDR: Channel analog watchdog and short circuit detector */
+#define DFSDM_AWSCDR_SCDT_MASK GENMASK(7, 0)
+#define DFSDM_AWSCDR_SCDT(v) FIELD_PREP(DFSDM_AWSCDR_SCDT_MASK, v)
+#define DFSDM_AWSCDR_BKSCD_MASK GENMASK(15, 12)
+#define DFSDM_AWSCDR_BKSCD(v) FIELD_PREP(DFSDM_AWSCDR_BKSCD_MASK, v)
+#define DFSDM_AWSCDR_AWFOSR_MASK GENMASK(20, 16)
+#define DFSDM_AWSCDR_AWFOSR(v) FIELD_PREP(DFSDM_AWSCDR_AWFOSR_MASK, v)
+#define DFSDM_AWSCDR_AWFORD_MASK GENMASK(23, 22)
+#define DFSDM_AWSCDR_AWFORD(v) FIELD_PREP(DFSDM_AWSCDR_AWFORD_MASK, v)
+
+/*
+ * Filters register definitions
+ */
+#define DFSDM_FILTER_BASE_ADR 0x100
+#define DFSDM_FILTER_REG_MASK 0x7F
+#define DFSDM_FILTER_X_BASE_ADR(x) ((x) * 0x80 + DFSDM_FILTER_BASE_ADR)
+
+#define DFSDM_CR1(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x00)
+#define DFSDM_CR2(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x04)
+#define DFSDM_ISR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x08)
+#define DFSDM_ICR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x0C)
+#define DFSDM_JCHGR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x10)
+#define DFSDM_FCR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x14)
+#define DFSDM_JDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x18)
+#define DFSDM_RDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x1C)
+#define DFSDM_AWHTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x20)
+#define DFSDM_AWLTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x24)
+#define DFSDM_AWSR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x28)
+#define DFSDM_AWCFR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x2C)
+#define DFSDM_EXMAX(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x30)
+#define DFSDM_EXMIN(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x34)
+#define DFSDM_CNVTIMR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x38)
+
+/* CR1 Control register 1 */
+#define DFSDM_CR1_DFEN_MASK BIT(0)
+#define DFSDM_CR1_DFEN(v) FIELD_PREP(DFSDM_CR1_DFEN_MASK, v)
+#define DFSDM_CR1_JSWSTART_MASK BIT(1)
+#define DFSDM_CR1_JSWSTART(v) FIELD_PREP(DFSDM_CR1_JSWSTART_MASK, v)
+#define DFSDM_CR1_JSYNC_MASK BIT(3)
+#define DFSDM_CR1_JSYNC(v) FIELD_PREP(DFSDM_CR1_JSYNC_MASK, v)
+#define DFSDM_CR1_JSCAN_MASK BIT(4)
+#define DFSDM_CR1_JSCAN(v) FIELD_PREP(DFSDM_CR1_JSCAN_MASK, v)
+#define DFSDM_CR1_JDMAEN_MASK BIT(5)
+#define DFSDM_CR1_JDMAEN(v) FIELD_PREP(DFSDM_CR1_JDMAEN_MASK, v)
+#define DFSDM_CR1_JEXTSEL_MASK GENMASK(12, 8)
+#define DFSDM_CR1_JEXTSEL(v) FIELD_PREP(DFSDM_CR1_JEXTSEL_MASK, v)
+#define DFSDM_CR1_JEXTEN_MASK GENMASK(14, 13)
+#define DFSDM_CR1_JEXTEN(v) FIELD_PREP(DFSDM_CR1_JEXTEN_MASK, v)
+#define DFSDM_CR1_RSWSTART_MASK BIT(17)
+#define DFSDM_CR1_RSWSTART(v) FIELD_PREP(DFSDM_CR1_RSWSTART_MASK, v)
+#define DFSDM_CR1_RCONT_MASK BIT(18)
+#define DFSDM_CR1_RCONT(v) FIELD_PREP(DFSDM_CR1_RCONT_MASK, v)
+#define DFSDM_CR1_RSYNC_MASK BIT(19)
+#define DFSDM_CR1_RSYNC(v) FIELD_PREP(DFSDM_CR1_RSYNC_MASK, v)
+#define DFSDM_CR1_RDMAEN_MASK BIT(21)
+#define DFSDM_CR1_RDMAEN(v) FIELD_PREP(DFSDM_CR1_RDMAEN_MASK, v)
+#define DFSDM_CR1_RCH_MASK GENMASK(26, 24)
+#define DFSDM_CR1_RCH(v) FIELD_PREP(DFSDM_CR1_RCH_MASK, v)
+#define DFSDM_CR1_FAST_MASK BIT(29)
+#define DFSDM_CR1_FAST(v) FIELD_PREP(DFSDM_CR1_FAST_MASK, v)
+#define DFSDM_CR1_AWFSEL_MASK BIT(30)
+#define DFSDM_CR1_AWFSEL(v) FIELD_PREP(DFSDM_CR1_AWFSEL_MASK, v)
+
+/* CR2: Control register 2 */
+#define DFSDM_CR2_IE_MASK GENMASK(6, 0)
+#define DFSDM_CR2_IE(v) FIELD_PREP(DFSDM_CR2_IE_MASK, v)
+#define DFSDM_CR2_JEOCIE_MASK BIT(0)
+#define DFSDM_CR2_JEOCIE(v) FIELD_PREP(DFSDM_CR2_JEOCIE_MASK, v)
+#define DFSDM_CR2_REOCIE_MASK BIT(1)
+#define DFSDM_CR2_REOCIE(v) FIELD_PREP(DFSDM_CR2_REOCIE_MASK, v)
+#define DFSDM_CR2_JOVRIE_MASK BIT(2)
+#define DFSDM_CR2_JOVRIE(v) FIELD_PREP(DFSDM_CR2_JOVRIE_MASK, v)
+#define DFSDM_CR2_ROVRIE_MASK BIT(3)
+#define DFSDM_CR2_ROVRIE(v) FIELD_PREP(DFSDM_CR2_ROVRIE_MASK, v)
+#define DFSDM_CR2_AWDIE_MASK BIT(4)
+#define DFSDM_CR2_AWDIE(v) FIELD_PREP(DFSDM_CR2_AWDIE_MASK, v)
+#define DFSDM_CR2_SCDIE_MASK BIT(5)
+#define DFSDM_CR2_SCDIE(v) FIELD_PREP(DFSDM_CR2_SCDIE_MASK, v)
+#define DFSDM_CR2_CKABIE_MASK BIT(6)
+#define DFSDM_CR2_CKABIE(v) FIELD_PREP(DFSDM_CR2_CKABIE_MASK, v)
+#define DFSDM_CR2_EXCH_MASK GENMASK(15, 8)
+#define DFSDM_CR2_EXCH(v) FIELD_PREP(DFSDM_CR2_EXCH_MASK, v)
+#define DFSDM_CR2_AWDCH_MASK GENMASK(23, 16)
+#define DFSDM_CR2_AWDCH(v) FIELD_PREP(DFSDM_CR2_AWDCH_MASK, v)
+
+/* ISR: Interrupt status register */
+#define DFSDM_ISR_JEOCF_MASK BIT(0)
+#define DFSDM_ISR_JEOCF(v) FIELD_PREP(DFSDM_ISR_JEOCF_MASK, v)
+#define DFSDM_ISR_REOCF_MASK BIT(1)
+#define DFSDM_ISR_REOCF(v) FIELD_PREP(DFSDM_ISR_REOCF_MASK, v)
+#define DFSDM_ISR_JOVRF_MASK BIT(2)
+#define DFSDM_ISR_JOVRF(v) FIELD_PREP(DFSDM_ISR_JOVRF_MASK, v)
+#define DFSDM_ISR_ROVRF_MASK BIT(3)
+#define DFSDM_ISR_ROVRF(v) FIELD_PREP(DFSDM_ISR_ROVRF_MASK, v)
+#define DFSDM_ISR_AWDF_MASK BIT(4)
+#define DFSDM_ISR_AWDF(v) FIELD_PREP(DFSDM_ISR_AWDF_MASK, v)
+#define DFSDM_ISR_JCIP_MASK BIT(13)
+#define DFSDM_ISR_JCIP(v) FIELD_PREP(DFSDM_ISR_JCIP_MASK, v)
+#define DFSDM_ISR_RCIP_MASK BIT(14)
+#define DFSDM_ISR_RCIP(v) FIELD_PREP(DFSDM_ISR_RCIP, v)
+#define DFSDM_ISR_CKABF_MASK GENMASK(23, 16)
+#define DFSDM_ISR_CKABF(v) FIELD_PREP(DFSDM_ISR_CKABF_MASK, v)
+#define DFSDM_ISR_SCDF_MASK GENMASK(31, 24)
+#define DFSDM_ISR_SCDF(v) FIELD_PREP(DFSDM_ISR_SCDF_MASK, v)
+
+/* ICR: Interrupt flag clear register */
+#define DFSDM_ICR_CLRJOVRF_MASK BIT(2)
+#define DFSDM_ICR_CLRJOVRF(v) FIELD_PREP(DFSDM_ICR_CLRJOVRF_MASK, v)
+#define DFSDM_ICR_CLRROVRF_MASK BIT(3)
+#define DFSDM_ICR_CLRROVRF(v) FIELD_PREP(DFSDM_ICR_CLRROVRF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_MASK GENMASK(23, 16)
+#define DFSDM_ICR_CLRCKABF(v) FIELD_PREP(DFSDM_ICR_CLRCKABF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_CH_MASK(y) BIT(16 + (y))
+#define DFSDM_ICR_CLRCKABF_CH(v, y) \
+ (((v) << (16 + (y))) & DFSDM_ICR_CLRCKABF_CH_MASK(y))
+#define DFSDM_ICR_CLRSCDF_MASK GENMASK(31, 24)
+#define DFSDM_ICR_CLRSCDF(v) FIELD_PREP(DFSDM_ICR_CLRSCDF_MASK, v)
+#define DFSDM_ICR_CLRSCDF_CH_MASK(y) BIT(24 + (y))
+#define DFSDM_ICR_CLRSCDF_CH(v, y) \
+ (((v) << (24 + (y))) & DFSDM_ICR_CLRSCDF_MASK(y))
+
+/* FCR: Filter control register */
+#define DFSDM_FCR_IOSR_MASK GENMASK(7, 0)
+#define DFSDM_FCR_IOSR(v) FIELD_PREP(DFSDM_FCR_IOSR_MASK, v)
+#define DFSDM_FCR_FOSR_MASK GENMASK(25, 16)
+#define DFSDM_FCR_FOSR(v) FIELD_PREP(DFSDM_FCR_FOSR_MASK, v)
+#define DFSDM_FCR_FORD_MASK GENMASK(31, 29)
+#define DFSDM_FCR_FORD(v) FIELD_PREP(DFSDM_FCR_FORD_MASK, v)
+
+/* RDATAR: Filter data register for regular channel */
+#define DFSDM_DATAR_CH_MASK GENMASK(2, 0)
+#define DFSDM_DATAR_DATA_OFFSET 8
+#define DFSDM_DATAR_DATA_MASK GENMASK(31, DFSDM_DATAR_DATA_OFFSET)
+
+/* AWLTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWLTR_BKAWL_MASK GENMASK(3, 0)
+#define DFSDM_AWLTR_BKAWL(v) FIELD_PREP(DFSDM_AWLTR_BKAWL_MASK, v)
+#define DFSDM_AWLTR_AWLT_MASK GENMASK(31, 8)
+#define DFSDM_AWLTR_AWLT(v) FIELD_PREP(DFSDM_AWLTR_AWLT_MASK, v)
+
+/* AWHTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWHTR_BKAWH_MASK GENMASK(3, 0)
+#define DFSDM_AWHTR_BKAWH(v) FIELD_PREP(DFSDM_AWHTR_BKAWH_MASK, v)
+#define DFSDM_AWHTR_AWHT_MASK GENMASK(31, 8)
+#define DFSDM_AWHTR_AWHT(v) FIELD_PREP(DFSDM_AWHTR_AWHT_MASK, v)
+
+/* AWSR: Filter watchdog status register */
+#define DFSDM_AWSR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWSR_AWLTF(v) FIELD_PREP(DFSDM_AWSR_AWLTF_MASK, v)
+#define DFSDM_AWSR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWSR_AWHTF(v) FIELD_PREP(DFSDM_AWSR_AWHTF_MASK, v)
+
+/* AWCFR: Filter watchdog status register */
+#define DFSDM_AWCFR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWCFR_AWLTF(v) FIELD_PREP(DFSDM_AWCFR_AWLTF_MASK, v)
+#define DFSDM_AWCFR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWCFR_AWHTF(v) FIELD_PREP(DFSDM_AWCFR_AWHTF_MASK, v)
+
+/* DFSDM filter order */
+enum stm32_dfsdm_sinc_order {
+ DFSDM_FASTSINC_ORDER, /* FastSinc filter type */
+ DFSDM_SINC1_ORDER, /* Sinc 1 filter type */
+ DFSDM_SINC2_ORDER, /* Sinc 2 filter type */
+ DFSDM_SINC3_ORDER, /* Sinc 3 filter type */
+ DFSDM_SINC4_ORDER, /* Sinc 4 filter type (N.A. for watchdog) */
+ DFSDM_SINC5_ORDER, /* Sinc 5 filter type (N.A. for watchdog) */
+ DFSDM_NB_SINC_ORDER,
+};
+
+/**
+ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
+ * @iosr: integrator oversampling
+ * @fosr: filter oversampling
+ * @ford: filter order
+ * @res: output sample resolution
+ * @sync_mode: filter synchronized with filter 0
+ * @fast: filter fast mode
+ */
+struct stm32_dfsdm_filter {
+ unsigned int iosr;
+ unsigned int fosr;
+ enum stm32_dfsdm_sinc_order ford;
+ u64 res;
+ unsigned int sync_mode;
+ unsigned int fast;
+};
+
+/**
+ * struct stm32_dfsdm_channel - structure relative to stm32 FDSDM channel
+ * @id: id of the channel
+ * @type: interface type linked to stm32_dfsdm_chan_type
+ * @src: interface type linked to stm32_dfsdm_chan_src
+ * @alt_si: alternative serial input interface
+ */
+struct stm32_dfsdm_channel {
+ unsigned int id;
+ unsigned int type;
+ unsigned int src;
+ unsigned int alt_si;
+};
+
+/**
+ * struct stm32_dfsdm - stm32 FDSDM driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @phys_base: DFSDM IP register physical address
+ * @regmap: regmap for register read/write
+ * @fl_list: filter resources list
+ * @num_fls: number of filter resources available
+ * @ch_list: channel resources list
+ * @num_chs: number of channel resources available
+ * @spi_master_freq: SPI clock out frequency
+ */
+struct stm32_dfsdm {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ struct regmap *regmap;
+ struct stm32_dfsdm_filter *fl_list;
+ unsigned int num_fls;
+ struct stm32_dfsdm_channel *ch_list;
+ unsigned int num_chs;
+ unsigned int spi_master_freq;
+};
+
+/* DFSDM channel serial spi clock source */
+enum stm32_dfsdm_spi_clk_src {
+ DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING
+};
+
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm);
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm);
+
+#endif
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
index 4ffd3db7817f..338774cba19b 100644
--- a/drivers/iio/buffer/Kconfig
+++ b/drivers/iio/buffer/Kconfig
@@ -29,6 +29,16 @@ config IIO_BUFFER_DMAENGINE
Should be selected by drivers that want to use this functionality.
+config IIO_BUFFER_HW_CONSUMER
+ tristate "Industrial I/O HW buffering"
+ help
+ Provides a way to bonding when an IIO device has a direct connection
+ to another device in hardware. In this case buffers for data transfers
+ are handled by hardware.
+
+ Should be selected by drivers that want to use the generic Hw consumer
+ interface.
+
config IIO_KFIFO_BUF
tristate "Industrial I/O buffering based on kfifo"
help
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
index 95f9f41c58b7..1403eb2f9409 100644
--- a/drivers/iio/buffer/Makefile
+++ b/drivers/iio/buffer/Makefile
@@ -7,5 +7,6 @@
obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o
+obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o
obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4847534700e7..ea63c838eeae 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -104,6 +104,17 @@ error_free_cb_buff:
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
+ size_t watermark)
+{
+ if (!watermark)
+ return -EINVAL;
+ cb_buff->buffer.watermark = watermark;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
+
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
{
return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
new file mode 100644
index 000000000000..95165697d8ae
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/buffer_impl.h>
+
+/**
+ * struct iio_hw_consumer - IIO hw consumer block
+ * @buffers: hardware buffers list head.
+ * @channels: IIO provider channels.
+ */
+struct iio_hw_consumer {
+ struct list_head buffers;
+ struct iio_channel *channels;
+};
+
+struct hw_consumer_buffer {
+ struct list_head head;
+ struct iio_dev *indio_dev;
+ struct iio_buffer buffer;
+ long scan_mask[];
+};
+
+static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
+ struct iio_buffer *buffer)
+{
+ return container_of(buffer, struct hw_consumer_buffer, buffer);
+}
+
+static void iio_hw_buf_release(struct iio_buffer *buffer)
+{
+ struct hw_consumer_buffer *hw_buf =
+ iio_buffer_to_hw_consumer_buffer(buffer);
+ kfree(hw_buf);
+}
+
+static const struct iio_buffer_access_funcs iio_hw_buf_access = {
+ .release = &iio_hw_buf_release,
+ .modes = INDIO_BUFFER_HARDWARE,
+};
+
+static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
+ struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
+{
+ size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ if (buf->indio_dev == indio_dev)
+ return buf;
+ }
+
+ buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->buffer.access = &iio_hw_buf_access;
+ buf->indio_dev = indio_dev;
+ buf->buffer.scan_mask = buf->scan_mask;
+
+ iio_buffer_init(&buf->buffer);
+ list_add_tail(&buf->head, &hwc->buffers);
+
+ return buf;
+}
+
+/**
+ * iio_hw_consumer_alloc() - Allocate IIO hardware consumer
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure.
+ */
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
+{
+ struct hw_consumer_buffer *buf;
+ struct iio_hw_consumer *hwc;
+ struct iio_channel *chan;
+ int ret;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&hwc->buffers);
+
+ hwc->channels = iio_channel_get_all(dev);
+ if (IS_ERR(hwc->channels)) {
+ ret = PTR_ERR(hwc->channels);
+ goto err_free_hwc;
+ }
+
+ chan = &hwc->channels[0];
+ while (chan->indio_dev) {
+ buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_put_buffers;
+ }
+ set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
+ chan++;
+ }
+
+ return hwc;
+
+err_put_buffers:
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ iio_channel_release_all(hwc->channels);
+err_free_hwc:
+ kfree(hwc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
+
+/**
+ * iio_hw_consumer_free() - Free IIO hardware consumer
+ * @hwc: hw consumer to free.
+ */
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf, *n;
+
+ iio_channel_release_all(hwc->channels);
+ list_for_each_entry_safe(buf, n, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ kfree(hwc);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
+
+static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+{
+ iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+}
+
+static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
+{
+ struct iio_hw_consumer **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
+ * @dev: Pointer to consumer device.
+ *
+ * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
+ * is automatically freed on driver detach.
+ *
+ * If an iio_hw_consumer allocated with this function needs to be freed
+ * separately, devm_iio_hw_consumer_free() must be used.
+ *
+ * returns pointer to allocated iio_hw_consumer on success, NULL on failure.
+ */
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
+{
+ struct iio_hw_consumer **ptr, *iio_hwc;
+
+ ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ iio_hwc = iio_hw_consumer_alloc(dev);
+ if (IS_ERR(iio_hwc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = iio_hwc;
+ devres_add(dev, ptr);
+ }
+
+ return iio_hwc;
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
+
+/**
+ * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
+ * @dev: Pointer to consumer device.
+ * @hwc: iio_hw_consumer to free.
+ *
+ * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
+ */
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_hw_consumer_release,
+ devm_iio_hw_consumer_match, hwc);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
+
+/**
+ * iio_hw_consumer_enable() - Enable IIO hardware consumer
+ * @hwc: iio_hw_consumer to enable.
+ *
+ * Returns 0 on success.
+ */
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+ int ret;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
+ if (ret)
+ goto err_disable_buffers;
+ }
+
+ return 0;
+
+err_disable_buffers:
+ list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
+
+/**
+ * iio_hw_consumer_disable() - Disable IIO hardware consumer
+ * @hwc: iio_hw_consumer to disable.
+ */
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 4c45488e3a7f..c775fedbcaf6 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -43,7 +43,7 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d2b465140a6b..0bc2fe31f211 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -169,7 +169,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or 0 for other cases
*/
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filp->private_data;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 90fac8ec63c9..0bcf073e46db 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -95,12 +95,12 @@ EXPORT_SYMBOL(iio_push_event);
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or a negative error code on failure
*/
-static unsigned int iio_event_poll(struct file *filep,
+static __poll_t iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
- unsigned int events = 0;
+ __poll_t events = 0;
if (!indio_dev->info)
return events;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 069defcc6d9b..ec98790e2a28 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -664,9 +664,8 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
-static int iio_read_channel_attribute(struct iio_channel *chan,
- int *val, int *val2,
- enum iio_chan_info_enum attribute)
+int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -682,6 +681,7 @@ err_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
{
@@ -850,7 +850,8 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
chan->channel, val, val2, info);
}
-int iio_write_channel_raw(struct iio_channel *chan, int val)
+int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -860,12 +861,18 @@ int iio_write_channel_raw(struct iio_channel *chan, int val)
goto err_unlock;
}
- ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_write(chan, val, val2, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
+
+int iio_write_channel_raw(struct iio_channel *chan, int val)
+{
+ return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
+}
EXPORT_SYMBOL_GPL(iio_write_channel_raw);
unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index cbf186522016..5cd700421695 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -4,6 +4,7 @@ menuconfig INFINIBAND
depends on NET
depends on INET
depends on m || IPV6 != m
+ depends on !ALPHA
select IRQ_POLL
---help---
Core support for InfiniBand (IB). Make sure to also select
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 504b926552c6..f69833db0a32 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -12,7 +12,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- security.o nldev.o
+ security.o nldev.o restrack.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f4e8185bccd3..a5b4cf030c11 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -243,8 +243,7 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr,
- u16 *vlan_id)
+ struct rdma_dev_addr *dev_addr)
{
struct net_device *dev;
@@ -266,9 +265,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
return -EADDRNOTAVAIL;
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
break;
#if IS_ENABLED(CONFIG_IPV6)
@@ -279,9 +275,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
}
}
@@ -481,7 +474,7 @@ static int addr_resolve_neigh(struct dst_entry *dst,
if (dst->dev->flags & IFF_LOOPBACK) {
int ret;
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr,
MAX_ADDR_LEN);
@@ -558,7 +551,7 @@ static int addr_resolve(struct sockaddr *src_in,
}
if (ndev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
/*
* Put the loopback device and get the translated
* device instead.
@@ -744,7 +737,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
- struct rdma_dev_addr *addr;
struct completion comp;
int status;
};
@@ -752,39 +744,31 @@ struct resolve_cb_context {
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- if (!status)
- memcpy(((struct resolve_cb_context *)context)->addr,
- addr, sizeof(struct rdma_dev_addr));
((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit)
{
- int ret = 0;
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
- struct net_device *dev;
-
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
-
+ int ret;
rdma_gid2ip(&sgid_addr._sockaddr, sgid);
rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
- if (if_index)
- dev_addr.bound_dev_if = *if_index;
+ dev_addr.bound_dev_if = ndev->ifindex;
dev_addr.net = &init_net;
- ctx.addr = &dev_addr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
&dev_addr, 1000, resolve_cb, &ctx);
@@ -798,42 +782,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
return ret;
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
- dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
- if (!dev)
- return -ENODEV;
- if (if_index)
- *if_index = dev_addr.bound_dev_if;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
- if (hoplimit)
- *hoplimit = dev_addr.hoplimit;
- dev_put(dev);
- return ret;
-}
-EXPORT_SYMBOL(rdma_addr_find_l2_eth_by_grh);
-
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
-{
- int ret = 0;
- struct rdma_dev_addr dev_addr;
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } gid_addr;
-
- rdma_gid2ip(&gid_addr._sockaddr, sgid);
-
- memset(&dev_addr, 0, sizeof(dev_addr));
- dev_addr.net = &init_net;
- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
- if (ret)
- return ret;
-
- memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
- return ret;
+ *hoplimit = dev_addr.hoplimit;
+ return 0;
}
-EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 77515638c55c..e9a409d7f4e2 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -573,27 +573,24 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
struct ib_gid_attr attr;
if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
- goto next;
+ continue;
if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
- goto next;
+ continue;
memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
- if (filter(gid, &attr, context))
+ if (filter(gid, &attr, context)) {
found = true;
-
-next:
- if (found)
+ if (index)
+ *index = i;
break;
+ }
}
read_unlock_irqrestore(&table->rwlock, flags);
if (!found)
return -ENOENT;
-
- if (index)
- *index = i;
return 0;
}
@@ -824,12 +821,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
if (err)
return err;
- err = roce_rescan_device(ib_dev);
-
- if (err) {
- gid_table_cleanup_one(ib_dev);
- gid_table_release_one(ib_dev);
- }
+ rdma_roce_rescan_device(ib_dev);
return err;
}
@@ -883,7 +875,6 @@ int ib_find_gid_by_filter(struct ib_device *device,
port_num, filter,
context, index);
}
-EXPORT_SYMBOL(ib_find_gid_by_filter);
int ib_get_cached_pkey(struct ib_device *device,
u8 port_num,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f6b159d79977..e6749157fd86 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -452,13 +452,14 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
-static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
- struct ib_grh *grh, struct cm_av *av)
+static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ struct ib_grh *grh, struct cm_av *av)
{
av->port = port;
av->pkey_index = wc->pkey_index;
- ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
- grh, &av->ah_attr);
+ return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
+ port->port_num, wc,
+ grh, &av->ah_attr);
}
static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
@@ -494,8 +495,11 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
return ret;
av->port = port;
- ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
- &av->ah_attr);
+ ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
+ &av->ah_attr);
+ if (ret)
+ return ret;
+
av->timeout = path->packet_life_time + 1;
spin_lock_irqsave(&cm.lock, flags);
@@ -1560,6 +1564,35 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
return pkey;
}
+/**
+ * Convert OPA SGID to IB SGID
+ * ULPs (such as IPoIB) do not understand OPA GIDs and will
+ * reject them as the local_gid will not match the sgid. Therefore,
+ * change the pathrec's SGID to an IB SGID.
+ *
+ * @work: Work completion
+ * @path: Path record
+ */
+static void cm_opa_to_ib_sgid(struct cm_work *work,
+ struct sa_path_rec *path)
+{
+ struct ib_device *dev = work->port->cm_dev->ib_device;
+ u8 port_num = work->port->port_num;
+
+ if (rdma_cap_opa_ah(dev, port_num) &&
+ (ib_is_opa_gid(&path->sgid))) {
+ union ib_gid sgid;
+
+ if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
+ dev_warn(&dev->dev,
+ "Error updating sgid in CM request\n");
+ return;
+ }
+
+ path->sgid = sgid;
+ }
+}
+
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
@@ -1573,10 +1606,13 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
- if (cm_req_has_alt_path(req_msg))
+ cm_opa_to_ib_sgid(work, param->primary_path);
+ if (cm_req_has_alt_path(req_msg)) {
param->alternate_path = &work->path[1];
- else
+ cm_opa_to_ib_sgid(work, param->alternate_path);
+ } else {
param->alternate_path = NULL;
+ }
param->remote_ca_guid = req_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
@@ -1826,9 +1862,11 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id = req_msg->local_comm_id;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto destroy;
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
@@ -1841,9 +1879,10 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
+ pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
+ be32_to_cpu(cm_id->local_id));
ret = -EINVAL;
- kfree(cm_id_priv->timewait_info);
- goto destroy;
+ goto free_timeinfo;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1861,56 +1900,50 @@ static int cm_req_handler(struct cm_work *work)
work->port->port_num,
grh->sgid_index,
&gid, &gid_attr);
- if (!ret) {
- if (gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- cm_format_paths_from_req(req_msg, &work->path[0],
- &work->path[1]);
- if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
- sa_path_set_dmac(&work->path[0],
- cm_id_priv->av.ah_attr.roce.dmac);
- work->path[0].hop_limit = grh->hop_limit;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
- cm_id_priv);
+ if (ret) {
+ ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
+ goto rejected;
+ }
+
+ if (gid_attr.ndev) {
+ work->path[0].rec_type =
+ sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
+ sa_path_set_ifindex(&work->path[0],
+ gid_attr.ndev->ifindex);
+ sa_path_set_ndev(&work->path[0],
+ dev_net(gid_attr.ndev));
+ dev_put(gid_attr.ndev);
+ } else {
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ &work->path[0],
+ &req_msg->primary_local_gid);
}
+ if (cm_req_has_alt_path(req_msg))
+ work->path[1].rec_type = work->path[0].rec_type;
+ cm_format_paths_from_req(req_msg, &work->path[0],
+ &work->path[1]);
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ sa_path_set_dmac(&work->path[0],
+ cm_id_priv->av.ah_attr.roce.dmac);
+ work->path[0].hop_limit = grh->hop_limit;
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
if (ret) {
- int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num, 0,
- &work->path[0].sgid,
- &gid_attr);
- if (!err && gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
- &work->path[0].sgid, sizeof work->path[0].sgid,
- NULL, 0);
+ int err;
+
+ err = ib_get_cached_gid(work->port->cm_dev->ib_device,
+ work->port->port_num, 0,
+ &work->path[0].sgid,
+ NULL);
+ if (err)
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ NULL, 0, NULL, 0);
+ else
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ &work->path[0].sgid,
+ sizeof(work->path[0].sgid),
+ NULL, 0);
goto rejected;
}
if (cm_req_has_alt_path(req_msg)) {
@@ -1919,7 +1952,7 @@ static int cm_req_handler(struct cm_work *work)
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
- sizeof work->path[0].sgid, NULL, 0);
+ sizeof(work->path[0].sgid), NULL, 0);
goto rejected;
}
}
@@ -1945,6 +1978,8 @@ static int cm_req_handler(struct cm_work *work)
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
+free_timeinfo:
+ kfree(cm_id_priv->timewait_info);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
@@ -1997,6 +2032,8 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
+ pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2063,6 +2100,8 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
+ pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto error;
}
@@ -2170,6 +2209,8 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
+ pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
return -EINVAL;
}
@@ -2183,6 +2224,10 @@ static int cm_rep_handler(struct cm_work *work)
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
+ __func__, cm_id_priv->id.state,
+ be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
@@ -2196,6 +2241,8 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: Failed to insert remote id %d\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
/* Check for a stale connection. */
@@ -2213,6 +2260,10 @@ static int cm_rep_handler(struct cm_work *work)
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
+ pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
+
if (cur_cm_id_priv) {
cm_id = &cur_cm_id_priv->id;
ib_send_cm_dreq(cm_id, NULL, 0);
@@ -2359,6 +2410,8 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED) {
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2428,6 +2481,8 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id,
if (cm_id->state != IB_CM_DREQ_RCVD) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
+ pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
+ __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
return -EINVAL;
}
@@ -2493,6 +2548,9 @@ static int cm_dreq_handler(struct cm_work *work)
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
+ pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(dreq_msg->local_comm_id),
+ be32_to_cpu(dreq_msg->remote_comm_id));
return -EINVAL;
}
@@ -2535,6 +2593,9 @@ static int cm_dreq_handler(struct cm_work *work)
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
@@ -2638,6 +2699,8 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
cm_enter_timewait(cm_id_priv);
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2748,6 +2811,9 @@ static int cm_rej_handler(struct cm_work *work)
/* fall through */
default:
spin_unlock_irq(&cm_id_priv->lock);
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto out;
}
@@ -2811,6 +2877,9 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
}
/* fall through */
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto error1;
}
@@ -2912,6 +2981,9 @@ static int cm_mra_handler(struct cm_work *work)
counter[CM_MRA_COUNTER]);
/* fall through */
default:
+ pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto out;
}
@@ -3085,6 +3157,12 @@ static int cm_lap_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto deref;
+
param = &work->cm_event.param.lap_rcvd;
memset(&work->path[0], 0, sizeof(work->path[1]));
cm_path_set_rec_type(work->port->cm_dev->ib_device,
@@ -3131,9 +3209,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -3386,6 +3461,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
+ int ret;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
@@ -3398,9 +3474,12 @@ static int cm_sidr_req_handler(struct cm_work *work)
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto out;
+
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
@@ -3692,6 +3771,7 @@ static void cm_work_handler(struct work_struct *_work)
ret = cm_timewait_handler(work);
break;
default:
+ pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
ret = -EINVAL;
break;
}
@@ -3727,6 +3807,8 @@ static int cm_establish(struct ib_cm_id *cm_id)
ret = -EISCONN;
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
break;
}
@@ -3924,6 +4006,9 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -3971,6 +4056,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -4030,6 +4118,9 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6294a7001d33..e66963ca58bd 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -601,7 +601,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret;
if (addr->sa_family != AF_IB) {
- ret = rdma_translate_ip(addr, dev_addr, NULL);
+ ret = rdma_translate_ip(addr, dev_addr);
} else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0;
@@ -612,11 +612,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
static inline int cma_validate_port(struct ib_device *device, u8 port,
enum ib_gid_type gid_type,
- union ib_gid *gid, int dev_type,
- int bound_if_index)
+ union ib_gid *gid,
+ struct rdma_id_private *id_priv)
{
- int ret = -ENODEV;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ int bound_if_index = dev_addr->bound_dev_if;
+ int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
+ int ret = -ENODEV;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
return ret;
@@ -624,11 +627,13 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
- ndev = dev_get_by_index(&init_net, bound_if_index);
- else
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ return ret;
+ } else {
gid_type = IB_GID_TYPE_IB;
-
+ }
ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
ndev, NULL);
@@ -669,8 +674,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
listen_id_priv->gid_type, gidp,
- dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -691,8 +695,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
cma_dev->default_gid_type[port - 1],
- gidp, dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ gidp, id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -2036,6 +2039,33 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_get_service_id);
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ struct rdma_addr *addr = &cm_id->route.addr;
+
+ if (!cm_id->device) {
+ if (sgid)
+ memset(sgid, 0, sizeof(*sgid));
+ if (dgid)
+ memset(dgid, 0, sizeof(*dgid));
+ return;
+ }
+
+ if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
+ if (sgid)
+ rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
+ if (dgid)
+ rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
+ } else {
+ if (sgid)
+ rdma_addr_get_sgid(&addr->dev_addr, sgid);
+ if (dgid)
+ rdma_addr_get_dgid(&addr->dev_addr, dgid);
+ }
+}
+EXPORT_SYMBOL(rdma_read_gids);
+
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
@@ -2132,7 +2162,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2414,6 +2444,26 @@ out:
kfree(work);
}
+static void cma_init_resolve_route_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+}
+
+static void cma_init_resolve_addr_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDR_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+}
+
static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2424,11 +2474,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
@@ -2449,10 +2495,63 @@ err1:
return ret;
}
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths)
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
+/*
+ * cma_iboe_set_path_rec_l2_fields() is helper function which sets
+ * path record type based on GID type.
+ * It also sets up other L2 fields which includes destination mac address
+ * netdev ifindex, of the path record.
+ * It returns the netdev of the bound interface for this path record entry.
+ */
+static struct net_device *
+cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
+{
+ struct rdma_route *route = &id_priv->id.route;
+ enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ struct rdma_addr *addr = &route->addr;
+ unsigned long supported_gids;
+ struct net_device *ndev;
+
+ if (!addr->dev_addr.bound_dev_if)
+ return NULL;
+
+ ndev = dev_get_by_index(addr->dev_addr.net,
+ addr->dev_addr.bound_dev_if);
+ if (!ndev)
+ return NULL;
+
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ gid_type = cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
+ /* Use the hint from IP Stack to select GID Type */
+ if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
+ gid_type = ib_network_to_gid_type(addr->dev_addr.network);
+ route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
+
+ sa_path_set_ndev(route->path_rec, addr->dev_addr.net);
+ sa_path_set_ifindex(route->path_rec, ndev->ifindex);
+ sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
+ return ndev;
+}
+
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec)
{
struct rdma_id_private *id_priv;
+ struct net_device *ndev;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
@@ -2460,20 +2559,33 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
RDMA_CM_ROUTE_RESOLVED))
return -EINVAL;
- id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
+ id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
GFP_KERNEL);
if (!id->route.path_rec) {
ret = -ENOMEM;
goto err;
}
- id->route.num_paths = num_paths;
+ if (rdma_protocol_roce(id->device, id->port_num)) {
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+ dev_put(ndev);
+ }
+
+ id->route.num_paths = 1;
return 0;
+
+err_free:
+ kfree(id->route.path_rec);
+ id->route.path_rec = NULL;
err:
cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret;
}
-EXPORT_SYMBOL(rdma_set_ib_paths);
+EXPORT_SYMBOL(rdma_set_ib_path);
static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
{
@@ -2483,11 +2595,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
}
@@ -2510,26 +2618,14 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
-static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
- unsigned long supported_gids,
- enum ib_gid_type default_gid)
-{
- if ((network_type == RDMA_NETWORK_IPV4 ||
- network_type == RDMA_NETWORK_IPV6) &&
- test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
- return IB_GID_TYPE_ROCE_UDP_ENCAP;
-
- return default_gid;
-}
-
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
struct rdma_addr *addr = &route->addr;
struct cma_work *work;
int ret;
- struct net_device *ndev = NULL;
- enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ struct net_device *ndev;
+
u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
@@ -2539,9 +2635,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
-
route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
@@ -2550,42 +2643,17 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->num_paths = 1;
- if (addr->dev_addr.bound_dev_if) {
- unsigned long supported_gids;
-
- ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
- if (!ndev) {
- ret = -ENODEV;
- goto err2;
- }
-
- supported_gids = roce_gid_type_mask_support(id_priv->id.device,
- id_priv->id.port_num);
- gid_type = cma_route_gid_type(addr->dev_addr.network,
- supported_gids,
- id_priv->gid_type);
- route->path_rec->rec_type =
- sa_conv_gid_to_pathrec_type(gid_type);
- sa_path_set_ndev(route->path_rec, &init_net);
- sa_path_set_ifindex(route->path_rec, ndev->ifindex);
- }
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) {
ret = -ENODEV;
goto err2;
}
- sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
-
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&route->path_rec->sgid);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
&route->path_rec->dgid);
- /* Use the hint from IP Stack to select GID Type */
- if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
- gid_type = ib_network_to_gid_type(addr->dev_addr.network);
- route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
-
if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
/* TODO: get the hoplimit from the inet/inet6 device */
route->path_rec->hop_limit = addr->dev_addr.hoplimit;
@@ -2607,11 +2675,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
- work->event.status = 0;
-
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@@ -2791,11 +2855,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -2821,11 +2881,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -3404,9 +3460,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = ret;
break;
}
- ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
- id_priv->id.route.path_rec,
- &event.param.ud.ah_attr);
+ ib_init_ah_attr_from_path(id_priv->id.device,
+ id_priv->id.port_num,
+ id_priv->id.route.path_rec,
+ &event.param.ud.ah_attr);
event.param.ud.qp_num = rep->qpn;
event.param.ud.qkey = rep->qkey;
event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -3873,7 +3930,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
struct rdma_dev_addr *dev_addr =
&id_priv->id.route.addr.dev_addr;
struct net_device *ndev =
- dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
enum ib_gid_type gid_type =
id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
@@ -4010,8 +4067,10 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
} else if (addr->sa_family == AF_INET6) {
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else {
- mgid->raw[0] = (gid_type == IB_GID_TYPE_IB) ? 0xff : 0;
- mgid->raw[1] = (gid_type == IB_GID_TYPE_IB) ? 0x0e : 0;
+ mgid->raw[0] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
+ mgid->raw[1] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
mgid->raw[2] = 0;
mgid->raw[3] = 0;
mgid->raw[4] = 0;
@@ -4061,7 +4120,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev) {
err = -ENODEV;
goto out2;
@@ -4179,7 +4238,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
struct net_device *ndev = NULL;
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net,
+ ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
if (ndev) {
cma_igmp_send(ndev,
@@ -4235,7 +4294,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
if (event != NETDEV_BONDING_FAILOVER)
return NOTIFY_DONE;
- if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
+ if (!netif_is_bond_master(ndev))
return NOTIFY_DONE;
mutex_lock(&lock);
@@ -4432,7 +4491,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
goto out;
if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
+ rdma_addr_size(cma_dst_addr(id_priv)),
cma_dst_addr(id_priv),
RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
goto out;
@@ -4444,6 +4503,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
id_stats->qp_type = id->qp_type;
i_id++;
+ nlmsg_end(skb, nlh);
}
cb->args[1] = 0;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 31dfee0c8295..eee38b40be99 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -295,7 +295,7 @@ static struct config_group *make_cma_dev(struct config_group *group,
goto fail;
}
- strncpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
+ strlcpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 66f0268f37a6..c4560d84dfae 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -40,8 +40,12 @@
#include <rdma/ib_verbs.h>
#include <rdma/opa_addr.h>
#include <rdma/ib_mad.h>
+#include <rdma/restrack.h>
#include "mad_priv.h"
+/* Total number of ports combined across all struct ib_devices's */
+#define RDMA_MAX_PORTS 1024
+
struct pkey_index_qp_list {
struct list_head pkey_index_list;
u16 pkey_index;
@@ -137,7 +141,6 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);
-int roce_rescan_device(struct ib_device *ib_dev);
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
int ib_cache_setup_one(struct ib_device *device);
@@ -191,13 +194,6 @@ void ib_sa_cleanup(void);
int rdma_nl_init(void);
void rdma_nl_exit(void);
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
@@ -213,11 +209,6 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx);
#ifdef CONFIG_SECURITY_INFINIBAND
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec);
-
void ib_security_destroy_port_pkey_list(struct ib_device *device);
void ib_security_cache_change(struct ib_device *device,
@@ -240,14 +231,6 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
#else
-static inline int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
-{
- return 0;
-}
-
static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
{
}
@@ -318,4 +301,31 @@ struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
+
+static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
+ struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ib_qp *qp;
+
+ qp = dev->create_qp(pd, attr, udata);
+ if (IS_ERR(qp))
+ return qp;
+
+ qp->device = dev;
+ qp->pd = pd;
+ /*
+ * We don't track XRC QPs for now, because they don't have PD
+ * and more importantly they are created internaly by driver,
+ * see mlx5 create_dev_resources() as an example.
+ */
+ if (attr->qp_type < IB_QPT_XRC_INI) {
+ qp->res.type = RDMA_RESTRACK_QP;
+ rdma_restrack_add(&qp->res);
+ } else
+ qp->res.valid = false;
+
+ return qp;
+}
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index f2ae75fa3128..bc79ca8215d7 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -25,9 +25,10 @@
#define IB_POLL_FLAGS \
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
-static int __ib_process_cq(struct ib_cq *cq, int budget)
+static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
{
int i, n, completed = 0;
+ struct ib_wc *wcs = poll_wc ? : cq->wc;
/*
* budget might be (-1) if the caller does not
@@ -35,9 +36,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* minimum here.
*/
while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
- budget - completed), cq->wc)) > 0) {
+ budget - completed), wcs)) > 0) {
for (i = 0; i < n; i++) {
- struct ib_wc *wc = &cq->wc[i];
+ struct ib_wc *wc = &wcs[i];
if (wc->wr_cqe)
wc->wr_cqe->done(cq, wc);
@@ -60,18 +61,20 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* @cq: CQ to process
* @budget: number of CQEs to poll for
*
- * This function is used to process all outstanding CQ entries on a
- * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
- * context and does not ask for completion interrupts from the HCA.
+ * This function is used to process all outstanding CQ entries.
+ * It does not offload CQ processing to a different context and does
+ * not ask for completion interrupts from the HCA.
+ * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
+ * concurrent processing.
*
* Note: do not pass -1 as %budget unless it is guaranteed that the number
* of completions that will be processed is small.
*/
int ib_process_cq_direct(struct ib_cq *cq, int budget)
{
- WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
+ struct ib_wc wcs[IB_POLL_BATCH];
- return __ib_process_cq(cq, budget);
+ return __ib_process_cq(cq, budget, wcs);
}
EXPORT_SYMBOL(ib_process_cq_direct);
@@ -85,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
int completed;
- completed = __ib_process_cq(cq, budget);
+ completed = __ib_process_cq(cq, budget, NULL);
if (completed < budget) {
irq_poll_complete(&cq->iop);
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -105,7 +108,7 @@ static void ib_cq_poll_work(struct work_struct *work)
struct ib_cq *cq = container_of(work, struct ib_cq, work);
int completed;
- completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
+ completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
queue_work(ib_comp_wq, &cq->work);
@@ -117,20 +120,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
}
/**
- * ib_alloc_cq - allocate a completion queue
+ * __ib_alloc_cq - allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
* @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from.
+ * @caller: module owner name.
*
* This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction.
*/
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx)
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller)
{
struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe,
@@ -154,6 +159,10 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
if (!cq->wc)
goto out_destroy_cq;
+ cq->res.type = RDMA_RESTRACK_CQ;
+ cq->res.kern_name = caller;
+ rdma_restrack_add(&cq->res);
+
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
cq->comp_handler = ib_cq_completion_direct;
@@ -178,11 +187,12 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
out_free_wc:
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
out_destroy_cq:
cq->device->destroy_cq(cq);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_alloc_cq);
+EXPORT_SYMBOL(__ib_alloc_cq);
/**
* ib_free_cq - free a completion queue
@@ -209,6 +219,7 @@ void ib_free_cq(struct ib_cq *cq)
}
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
ret = cq->device->destroy_cq(cq);
WARN_ON_ONCE(ret);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 465520627e4b..e8010e73a1cf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -263,6 +263,8 @@ struct ib_device *ib_alloc_device(size_t size)
if (!device)
return NULL;
+ rdma_restrack_init(&device->res);
+
device->dev.class = &ib_class;
device_initialize(&device->dev);
@@ -288,7 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
{
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
- kobject_put(&device->dev.kobj);
+ put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
@@ -462,7 +464,6 @@ int ib_register_device(struct ib_device *device,
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
- WARN_ON_ONCE(!parent);
WARN_ON_ONCE(device->dma_device);
if (device->dev.dma_ops) {
/*
@@ -471,16 +472,25 @@ int ib_register_device(struct ib_device *device,
* into device->dev.
*/
device->dma_device = &device->dev;
- if (!device->dev.dma_mask)
- device->dev.dma_mask = parent->dma_mask;
- if (!device->dev.coherent_dma_mask)
- device->dev.coherent_dma_mask =
- parent->coherent_dma_mask;
+ if (!device->dev.dma_mask) {
+ if (parent)
+ device->dev.dma_mask = parent->dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
+ if (!device->dev.coherent_dma_mask) {
+ if (parent)
+ device->dev.coherent_dma_mask =
+ parent->coherent_dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
} else {
/*
* The caller did not provide custom DMA operations. Use the
* DMA mapping operations of the parent device.
*/
+ WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
@@ -588,6 +598,8 @@ void ib_unregister_device(struct ib_device *device)
}
up_read(&lists_rwsem);
+ rdma_restrack_clean(&device->res);
+
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
@@ -1033,32 +1045,22 @@ EXPORT_SYMBOL(ib_modify_port);
/**
* ib_find_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
+ * a specified GID value occurs. Its searches only for IB link layer.
* @device: The device to query.
* @gid: The GID value to search for.
- * @gid_type: Type of GID.
* @ndev: The ndev related to the GID to search for.
* @port_num: The port number of the device where the GID value was found.
* @index: The index into the GID table where the GID was found. This
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index)
+ struct net_device *ndev, u8 *port_num, u16 *index)
{
union ib_gid tmp_gid;
int ret, port, i;
for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
- if (rdma_cap_roce_gid_table(device, port)) {
- if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
- ndev, index)) {
- *port_num = port;
- return 0;
- }
- }
-
- if (gid_type != IB_GID_TYPE_IB)
+ if (rdma_cap_roce_gid_table(device, port))
continue;
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 84d2615b5d4b..a0a9ed719031 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -388,13 +388,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
EXPORT_SYMBOL(ib_flush_fmr_pool);
/**
- * ib_fmr_pool_map_phys -
- * @pool:FMR pool to allocate FMR from
- * @page_list:List of pages to map
- * @list_len:Number of pages in @page_list
- * @io_virtual_address:I/O virtual address for new FMR
- *
- * Map an FMR from an FMR pool.
+ * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
+ * @pool_handle: FMR pool to allocate FMR from
+ * @page_list: List of pages to map
+ * @list_len: Number of pages in @page_list
+ * @io_virtual_address: I/O virtual address for new FMR
*/
struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
u64 *page_list,
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 3c4faadb8cdd..81528f64061a 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -654,6 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cb91245e9163..c50596f7f98a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -49,7 +49,6 @@
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"
-#include "core_priv.h"
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 1fb72c356e36..3ccaae18ad75 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -41,8 +41,6 @@
#include <linux/module.h>
#include "core_priv.h"
-#include "core_priv.h"
-
static DEFINE_MUTEX(rdma_nl_mutex);
static struct sock *nls;
static struct {
@@ -83,15 +81,13 @@ static bool is_nl_valid(unsigned int type, unsigned int op)
if (!is_nl_msg_valid(type, op))
return false;
- cb_table = rdma_nl_types[type].cb_table;
-#ifdef CONFIG_MODULES
- if (!cb_table) {
+ if (!rdma_nl_types[type].cb_table) {
mutex_unlock(&rdma_nl_mutex);
request_module("rdma-netlink-subsys-%d", type);
mutex_lock(&rdma_nl_mutex);
- cb_table = rdma_nl_types[type].cb_table;
}
-#endif
+
+ cb_table = rdma_nl_types[type].cb_table;
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
return false;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0dcd1aa6f683..fa8655e3b3ed 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -31,6 +31,8 @@
*/
#include <linux/module.h>
+#include <linux/pid.h>
+#include <linux/pid_namespace.h>
#include <net/netlink.h>
#include <rdma/rdma_netlink.h>
@@ -52,16 +54,42 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
+ .len = 16 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
+ .len = TASK_COMM_LEN },
};
-static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
- char fw[IB_FW_VERSION_NAME_MAX];
-
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
return -EMSGSIZE;
if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
return -EMSGSIZE;
+
+ return 0;
+}
+
+static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+{
+ char fw[IB_FW_VERSION_NAME_MAX];
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
return -EMSGSIZE;
@@ -92,10 +120,9 @@ static int fill_port_info(struct sk_buff *msg,
struct ib_port_attr attr;
int ret;
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
- return -EMSGSIZE;
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
return -EMSGSIZE;
@@ -126,6 +153,137 @@ static int fill_port_info(struct sk_buff *msg,
return 0;
}
+static int fill_res_info_entry(struct sk_buff *msg,
+ const char *name, u64 curr)
+{
+ struct nlattr *entry_attr;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
+ goto err;
+ if (nla_put_u64_64bit(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0))
+ goto err;
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
+{
+ static const char * const names[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_PD] = "pd",
+ [RDMA_RESTRACK_CQ] = "cq",
+ [RDMA_RESTRACK_QP] = "qp",
+ };
+
+ struct rdma_restrack_root *res = &device->res;
+ struct nlattr *table_attr;
+ int ret, i, curr;
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
+ if (!names[i])
+ continue;
+ curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
+ ret = fill_res_info_entry(msg, names[i], curr);
+ if (ret)
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return ret;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg,
+ struct ib_qp *qp, uint32_t port)
+{
+ struct rdma_restrack_entry *res = &qp->res;
+ struct ib_qp_init_attr qp_init_attr;
+ struct nlattr *entry_attr;
+ struct ib_qp_attr qp_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
+ if (ret)
+ return ret;
+
+ if (port && port != qp_attr.port_num)
+ return 0;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ goto out;
+
+ /* In create_qp() port is not set yet */
+ if (qp_attr.port_num &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
+ goto err;
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
+ qp_attr.dest_qp_num))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
+ qp_attr.rq_psn))
+ goto err;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
+ goto err;
+
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
+ qp_attr.path_mig_state))
+ goto err;
+ }
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
+ goto err;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
+ goto err;
+
+ /*
+ * Existence of task means that it is user QP and netlink
+ * user is invited to go and read /proc/PID/comm to get name
+ * of the task file and res->task_com should be NULL.
+ */
+ if (rdma_is_kernel_res(res)) {
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, res->kern_name))
+ goto err;
+ } else {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, task_pid_vnr(res->task)))
+ goto err;
+ }
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+out:
+ return -EMSGSIZE;
+}
+
static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -321,6 +479,213 @@ out:
return skb->len;
}
+static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ goto err;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, 0);
+
+ ret = fill_res_info(msg, device);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ put_device(&device->dev);
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return ret;
+}
+
+static int _nldev_res_get_dumpit(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx)
+{
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+
+ if (idx < start)
+ return 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_res_info(skb, device)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+ idx++;
+
+out:
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_res_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
+}
+
+static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct rdma_restrack_entry *res;
+ int err, ret = 0, idx = 0;
+ struct nlattr *table_attr;
+ struct ib_device *device;
+ int start = cb->args[0];
+ struct ib_qp *qp = NULL;
+ struct nlmsghdr *nlh;
+ u32 index, port = 0;
+
+ err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
+ /*
+ * Right now, we are expecting the device index to get QP information,
+ * but it is possible to extend this code to return all devices in
+ * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
+ * if it doesn't exist, we will iterate over all devices.
+ *
+ * But it is not needed for now.
+ */
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ /*
+ * If no PORT_INDEX is supplied, we will return all QPs from that device
+ */
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err_index;
+ }
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_QP_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_nldev_handle(skb, device)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ table_attr = nla_nest_start(skb, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ down_read(&device->res.rwsem);
+ hash_for_each_possible(device->res.hash, res, node, RDMA_RESTRACK_QP) {
+ if (idx < start)
+ goto next;
+
+ if ((rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != &init_pid_ns) ||
+ (!rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != task_active_pid_ns(res->task)))
+ /*
+ * 1. Kernel QPs should be visible in init namspace only
+ * 2. Present only QPs visible in the current namespace
+ */
+ goto next;
+
+ if (!rdma_restrack_get(res))
+ /*
+ * Resource is under release now, but we are not
+ * relesing lock now, so it will be released in
+ * our next pass, once we will get ->next pointer.
+ */
+ goto next;
+
+ qp = container_of(res, struct ib_qp, res);
+
+ up_read(&device->res.rwsem);
+ ret = fill_res_qp_entry(skb, qp, port);
+ down_read(&device->res.rwsem);
+ /*
+ * Return resource back, but it won't be released till
+ * the &device->res.rwsem will be released for write.
+ */
+ rdma_restrack_put(res);
+
+ if (ret == -EMSGSIZE)
+ /*
+ * There is a chance to optimize here.
+ * It can be done by using list_prepare_entry
+ * and list_for_each_entry_continue afterwards.
+ */
+ break;
+ if (ret)
+ goto res_err;
+next: idx++;
+ }
+ up_read(&device->res.rwsem);
+
+ nla_nest_end(skb, table_attr);
+ nlmsg_end(skb, nlh);
+ cb->args[0] = idx;
+
+ /*
+ * No more QPs to fill, cancel the message and
+ * return 0 to mark end of dumpit.
+ */
+ if (!qp)
+ goto err;
+
+ put_device(&device->dev);
+ return skb->len;
+
+res_err:
+ nla_nest_cancel(skb, table_attr);
+ up_read(&device->res.rwsem);
+
+err:
+ nlmsg_cancel(skb, nlh);
+
+err_index:
+ put_device(&device->dev);
+ return ret;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
@@ -330,6 +695,23 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
},
+ [RDMA_NLDEV_CMD_RES_GET] = {
+ .doit = nldev_res_get_doit,
+ .dump = nldev_res_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_QP_GET] = {
+ .dump = nldev_res_get_qp_dumpit,
+ /*
+ * .doit is not implemented yet for two reasons:
+ * 1. It is not needed yet.
+ * 2. There is a need to provide identifier, while it is easy
+ * for the QPs (device index + port index + LQPN), it is not
+ * the case for the rest of resources (PD and CQ). Because it
+ * is better to provide similar interface for all resources,
+ * let's wait till we will have other resources implemented
+ * too.
+ */
+ },
};
void __init nldev_init(void)
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
new file mode 100644
index 000000000000..857637bf46da
--- /dev/null
+++ b/drivers/infiniband/core/restrack.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
+#include <linux/mutex.h>
+#include <linux/sched/task.h>
+#include <linux/uaccess.h>
+#include <linux/pid_namespace.h>
+
+void rdma_restrack_init(struct rdma_restrack_root *res)
+{
+ init_rwsem(&res->rwsem);
+}
+
+void rdma_restrack_clean(struct rdma_restrack_root *res)
+{
+ WARN_ON_ONCE(!hash_empty(res->hash));
+}
+
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns)
+{
+ struct rdma_restrack_entry *e;
+ u32 cnt = 0;
+
+ down_read(&res->rwsem);
+ hash_for_each_possible(res->hash, e, node, type) {
+ if (ns == &init_pid_ns ||
+ (!rdma_is_kernel_res(e) &&
+ ns == task_active_pid_ns(e->task)))
+ cnt++;
+ }
+ up_read(&res->rwsem);
+ return cnt;
+}
+EXPORT_SYMBOL(rdma_restrack_count);
+
+static void set_kern_name(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_qp *qp;
+
+ if (type != RDMA_RESTRACK_QP)
+ /* PD and CQ types already have this name embedded in */
+ return;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->pd) {
+ WARN_ONCE(true, "XRC QPs are not supported\n");
+ /* Survive, despite the programmer's error */
+ res->kern_name = " ";
+ return;
+ }
+
+ res->kern_name = qp->pd->res.kern_name;
+}
+
+static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_device *dev;
+ struct ib_xrcd *xrcd;
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+
+ switch (type) {
+ case RDMA_RESTRACK_PD:
+ pd = container_of(res, struct ib_pd, res);
+ dev = pd->device;
+ break;
+ case RDMA_RESTRACK_CQ:
+ cq = container_of(res, struct ib_cq, res);
+ dev = cq->device;
+ break;
+ case RDMA_RESTRACK_QP:
+ qp = container_of(res, struct ib_qp, res);
+ dev = qp->device;
+ break;
+ case RDMA_RESTRACK_XRCD:
+ xrcd = container_of(res, struct ib_xrcd, res);
+ dev = xrcd->device;
+ break;
+ default:
+ WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
+ return NULL;
+ }
+
+ return dev;
+}
+
+void rdma_restrack_add(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev = res_to_dev(res);
+
+ if (!dev)
+ return;
+
+ if (!uaccess_kernel()) {
+ get_task_struct(current);
+ res->task = current;
+ res->kern_name = NULL;
+ } else {
+ set_kern_name(res);
+ res->task = NULL;
+ }
+
+ kref_init(&res->kref);
+ init_completion(&res->comp);
+ res->valid = true;
+
+ down_write(&dev->res.rwsem);
+ hash_add(dev->res.hash, &res->node, res->type);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_add);
+
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res)
+{
+ return kref_get_unless_zero(&res->kref);
+}
+EXPORT_SYMBOL(rdma_restrack_get);
+
+static void restrack_release(struct kref *kref)
+{
+ struct rdma_restrack_entry *res;
+
+ res = container_of(kref, struct rdma_restrack_entry, kref);
+ complete(&res->comp);
+}
+
+int rdma_restrack_put(struct rdma_restrack_entry *res)
+{
+ return kref_put(&res->kref, restrack_release);
+}
+EXPORT_SYMBOL(rdma_restrack_put);
+
+void rdma_restrack_del(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev;
+
+ if (!res->valid)
+ return;
+
+ dev = res_to_dev(res);
+ if (!dev)
+ return;
+
+ rdma_restrack_put(res);
+
+ wait_for_completion(&res->comp);
+
+ down_write(&dev->res.rwsem);
+ hash_del(&res->node);
+ res->valid = false;
+ if (res->task)
+ put_task_struct(res->task);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 90e3889b7fbe..5a52ec77940a 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -410,15 +410,18 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
rtnl_unlock();
}
-/* This function will rescan all of the network devices in the system
- * and add their gids, as needed, to the relevant RoCE devices. */
-int roce_rescan_device(struct ib_device *ib_dev)
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ib_dev)
{
ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
enum_all_gids_of_dev_cb, NULL);
-
- return 0;
}
+EXPORT_SYMBOL(rdma_roce_rescan_device);
static void callback_for_addr_gid_device_scan(struct ib_device *device,
u8 port,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index ab5e1024fea9..8cf15d4a8ac4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1227,9 +1227,9 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr)
{
int ret;
u16 gid_index;
@@ -1341,10 +1341,11 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_path);
+EXPORT_SYMBOL(ib_init_ah_attr_from_path);
static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
{
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
spin_lock_irqsave(&query->port->ah_lock, flags);
@@ -1356,6 +1357,15 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
query->sm_ah = query->port->sm_ah;
spin_unlock_irqrestore(&query->port->ah_lock, flags);
+ /*
+ * Always check if sm_ah has valid dlid assigned,
+ * before querying for class port info
+ */
+ if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
+ !rdma_is_valid_unicast_lid(&ah_attr)) {
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+ return -EAGAIN;
+ }
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
query->sm_ah->pkey_index,
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 59b2f96d986a..b61dda6b04fc 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -653,12 +653,11 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
return ret;
}
-EXPORT_SYMBOL(ib_security_modify_qp);
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
+static int ib_security_pkey_access(struct ib_device *dev,
+ u8 port_num,
+ u16 pkey_index,
+ void *sec)
{
u64 subnet_prefix;
u16 pkey;
@@ -678,7 +677,6 @@ int ib_security_pkey_access(struct ib_device *dev,
return security_ib_pkey_access(sec, subnet_prefix, pkey);
}
-EXPORT_SYMBOL(ib_security_pkey_access);
static int ib_mad_agent_security_change(struct notifier_block *nb,
unsigned long event,
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index e30d86fa1855..8ae1308eecc7 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1276,7 +1276,6 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- WARN_ON_ONCE(!device->dev.parent);
ret = dev_set_name(class_dev, "%s", device->name);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f2a7f62c2834..8ae636bb09e5 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -53,6 +53,8 @@
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Libor Michalek");
MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
MODULE_LICENSE("Dual BSD/GPL");
@@ -104,10 +106,13 @@ struct ib_ucm_event {
enum {
IB_UCM_MAJOR = 231,
IB_UCM_BASE_MINOR = 224,
- IB_UCM_MAX_DEVICES = 32
+ IB_UCM_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UCM_NUM_FIXED_MINOR = 32,
+ IB_UCM_NUM_DYNAMIC_MINOR = IB_UCM_MAX_DEVICES - IB_UCM_NUM_FIXED_MINOR,
};
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
+static dev_t dynamic_ucm_dev;
static void ib_ucm_add_one(struct ib_device *device);
static void ib_ucm_remove_one(struct ib_device *device, void *client_data);
@@ -1130,11 +1135,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
return result;
}
-static unsigned int ib_ucm_poll(struct file *filp,
+static __poll_t ib_ucm_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_ucm_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
@@ -1199,7 +1204,6 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0;
}
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
@@ -1210,10 +1214,7 @@ static void ib_ucm_release_dev(struct device *dev)
static void ib_ucm_free_dev(struct ib_ucm_device *ucm_dev)
{
- if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
- clear_bit(ucm_dev->devnum, dev_map);
- else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
+ clear_bit(ucm_dev->devnum, dev_map);
}
static const struct file_operations ucm_fops = {
@@ -1235,27 +1236,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
-static dev_t overflow_maj;
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
- "infiniband_cm");
- if (ret) {
- pr_err("ucm: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
- if (ret >= IB_UCM_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_ucm_add_one(struct ib_device *device)
{
int devnum;
@@ -1274,19 +1254,14 @@ static void ib_ucm_add_one(struct ib_device *device)
ucm_dev->dev.release = ib_ucm_release_dev;
devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (devnum >= IB_UCM_MAX_DEVICES) {
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- ucm_dev->devnum = devnum;
- base = devnum + IB_UCM_BASE_DEV;
- set_bit(devnum, dev_map);
- }
+ if (devnum >= IB_UCM_MAX_DEVICES)
+ goto err;
+ ucm_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UCM_NUM_FIXED_MINOR)
+ base = dynamic_ucm_dev + devnum - IB_UCM_NUM_FIXED_MINOR;
+ else
+ base = IB_UCM_BASE_DEV + devnum;
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
@@ -1334,13 +1309,20 @@ static int __init ib_ucm_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR,
"infiniband_cm");
if (ret) {
pr_err("ucm: couldn't register device number\n");
goto error1;
}
+ ret = alloc_chrdev_region(&dynamic_ucm_dev, 0, IB_UCM_NUM_DYNAMIC_MINOR,
+ "infiniband_cm");
+ if (ret) {
+ pr_err("ucm: couldn't register dynamic device number\n");
+ goto err_alloc;
+ }
+
ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) {
pr_err("ucm: couldn't create abi_version attribute\n");
@@ -1357,7 +1339,9 @@ static int __init ib_ucm_init(void)
error3:
class_remove_file(&cm_class, &class_attr_abi_version.attr);
error2:
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
+err_alloc:
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
error1:
return ret;
}
@@ -1366,9 +1350,8 @@ static void __exit ib_ucm_cleanup(void)
{
ib_unregister_client(&ucm_client);
class_remove_file(&cm_class, &class_attr_abi_version.attr);
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
idr_destroy(&ctx_id_table);
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index eb85b546e223..6ba4231f2b07 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,13 +904,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
IB_PATH_BIDIRECTIONAL;
- if (rec->rec_type == SA_PATH_REC_TYPE_IB) {
- ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
- } else {
+ if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
struct sa_path_rec ib;
sa_convert_path_opa_to_ib(&ib, rec);
ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
+
+ } else {
+ ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
}
}
@@ -943,8 +944,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
+ NULL);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.src_addr);
}
@@ -956,8 +957,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, NULL,
+ (union ib_gid *)&addr->sib_addr);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.dst_addr);
}
@@ -1231,9 +1232,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path);
- ret = rdma_set_ib_paths(ctx->cm_id, &opa, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
} else {
- ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
}
if (ret)
return ret;
@@ -1630,10 +1631,10 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ucma_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 130606c3b07c..9a4e899d94b3 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -352,7 +352,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 4b64dd02e090..78c77962422e 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -55,16 +55,21 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_user_mad.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL");
enum {
- IB_UMAD_MAX_PORTS = 64,
+ IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32,
IB_UMAD_MAJOR = 231,
- IB_UMAD_MINOR_BASE = 0
+ IB_UMAD_MINOR_BASE = 0,
+ IB_UMAD_NUM_FIXED_MINOR = 64,
+ IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR,
+ IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR,
};
/*
@@ -127,9 +132,12 @@ struct ib_umad_packet {
static struct class *umad_class;
-static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
+ IB_UMAD_NUM_FIXED_MINOR;
+static dev_t dynamic_umad_dev;
+static dev_t dynamic_issm_dev;
-static DEFINE_SPINLOCK(port_lock);
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
@@ -233,8 +241,7 @@ static void recv_handler(struct ib_mad_agent *agent,
* On OPA devices it is okay to lose the upper 16 bits of LID as this
* information is obtained elsewhere. Mask off the upper 16 bits.
*/
- if (agent->device->port_immutable[agent->port_num].core_cap_flags &
- RDMA_CORE_PORT_INTEL_OPA)
+ if (rdma_cap_opa_mad(agent->device, agent->port_num))
packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
mad_recv_wc->wc->slid);
else
@@ -246,10 +253,14 @@ static void recv_handler(struct ib_mad_agent *agent,
if (packet->mad.hdr.grh_present) {
struct rdma_ah_attr ah_attr;
const struct ib_global_route *grh;
+ int ret;
- ib_init_ah_from_wc(agent->device, agent->port_num,
- mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
- &ah_attr);
+ ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num,
+ mad_recv_wc->wc,
+ mad_recv_wc->recv_buf.grh,
+ &ah_attr);
+ if (ret)
+ goto err2;
grh = rdma_ah_read_grh(&ah_attr);
packet->mad.hdr.gid_index = grh->sgid_index;
@@ -500,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
}
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.type = rdma_ah_find_type(file->port->ib_dev,
+ ah_attr.type = rdma_ah_find_type(agent->device,
file->port->port_num);
rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
@@ -617,12 +628,12 @@ err:
return ret;
}
-static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ib_umad_file *file = filp->private_data;
/* we will always be able to post a MAD send */
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(filp, &file->recv_wait, wait);
@@ -1139,54 +1150,26 @@ static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_MAD_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
-static int find_overflow_devnum(struct ib_device *device)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
- "infiniband_mad");
- if (ret) {
- dev_err(&device->dev,
- "couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
- if (ret >= IB_UMAD_MAX_PORTS)
- return -1;
-
- return ret;
-}
-
static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_device *umad_dev,
struct ib_umad_port *port)
{
int devnum;
- dev_t base;
+ dev_t base_umad;
+ dev_t base_issm;
- spin_lock(&port_lock);
devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS) {
- spin_unlock(&port_lock);
- devnum = find_overflow_devnum(device);
- if (devnum < 0)
- return -1;
-
- spin_lock(&port_lock);
- port->dev_num = devnum + IB_UMAD_MAX_PORTS;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
+ if (devnum >= IB_UMAD_MAX_PORTS)
+ return -1;
+ port->dev_num = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
+ base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
+ base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
} else {
- port->dev_num = devnum;
- base = devnum + base_dev;
- set_bit(devnum, dev_map);
+ base_umad = devnum + base_umad_dev;
+ base_issm = devnum + base_issm_dev;
}
- spin_unlock(&port_lock);
port->ib_dev = device;
port->port_num = port_num;
@@ -1198,7 +1181,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->cdev.owner = THIS_MODULE;
cdev_set_parent(&port->cdev, &umad_dev->kobj);
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
- if (cdev_add(&port->cdev, base, 1))
+ if (cdev_add(&port->cdev, base_umad, 1))
goto err_cdev;
port->dev = device_create(umad_class, device->dev.parent,
@@ -1212,12 +1195,11 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->dev, &dev_attr_port))
goto err_dev;
- base += IB_UMAD_MAX_PORTS;
cdev_init(&port->sm_cdev, &umad_sm_fops);
port->sm_cdev.owner = THIS_MODULE;
cdev_set_parent(&port->sm_cdev, &umad_dev->kobj);
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
- if (cdev_add(&port->sm_cdev, base, 1))
+ if (cdev_add(&port->sm_cdev, base_issm, 1))
goto err_sm_cdev;
port->sm_dev = device_create(umad_class, device->dev.parent,
@@ -1244,10 +1226,7 @@ err_dev:
err_cdev:
cdev_del(&port->cdev);
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
return -1;
}
@@ -1281,11 +1260,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
}
mutex_unlock(&port->file_mutex);
-
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(port->dev_num, dev_map);
- else
- clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
+ clear_bit(port->dev_num, dev_map);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1361,13 +1336,23 @@ static int __init ib_umad_init(void)
{
int ret;
- ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
+ ret = register_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2,
"infiniband_mad");
if (ret) {
pr_err("couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_umad_dev, 0,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2,
+ "infiniband_mad");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+ dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR;
+
umad_class = class_create(THIS_MODULE, "infiniband_mad");
if (IS_ERR(umad_class)) {
ret = PTR_ERR(umad_class);
@@ -1395,7 +1380,12 @@ out_class:
class_destroy(umad_class);
out_chrdev:
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
+
+out_alloc:
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
out:
return ret;
@@ -1405,9 +1395,10 @@ static void __exit ib_umad_cleanup(void)
{
ib_unregister_client(&umad_client);
class_destroy(umad_class);
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 840b24096690..256934d1f64f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -340,6 +340,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
uobj->object = pd;
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
+ pd->res.type = RDMA_RESTRACK_PD;
+ rdma_restrack_add(&pd->res);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
@@ -1033,6 +1035,8 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
goto err_cb;
uobj_alloc_commit(&obj->uobject);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
return obj;
@@ -1145,10 +1149,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
min(ucore->inlen, sizeof(cmd)),
ib_uverbs_ex_create_cq_cb, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- return 0;
+ return PTR_ERR_OR_ZERO(obj);
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
@@ -1199,7 +1200,7 @@ static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
tmp.opcode = wc->opcode;
tmp.vendor_err = wc->vendor_err;
tmp.byte_len = wc->byte_len;
- tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
+ tmp.ex.imm_data = wc->ex.imm_data;
tmp.qp_num = wc->qp->qp_num;
tmp.src_qp = wc->src_qp;
tmp.wc_flags = wc->wc_flags;
@@ -1517,7 +1518,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = device->create_qp(pd, &attr, uhw);
+ qp = _ib_create_qp(device, pd, &attr, uhw);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@@ -1530,7 +1531,6 @@ static int create_qp(struct ib_uverbs_file *file,
goto err_cb;
qp->real_qp = qp;
- qp->device = device;
qp->pd = pd;
qp->send_cq = attr.send_cq;
qp->recv_cq = attr.recv_cq;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 71ff2644e053..d96dc1d17be1 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -243,16 +243,13 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
size_t ctx_size;
uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
- if (hdr->reserved)
- return -EINVAL;
-
object_spec = uverbs_get_object(ib_dev, hdr->object_id);
if (!object_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
method_spec = uverbs_get_method(object_spec, hdr->method_id);
if (!method_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
return -EINVAL;
@@ -305,6 +302,16 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
file, method_spec, ctx->uverbs_attr_bundle);
+
+ /*
+ * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
+ * not invoke the method because the request is not supported. No
+ * other cases should return this code.
+ */
+ if (unlikely(err == -EPROTONOSUPPORT)) {
+ WARN_ON_ONCE(err == -EPROTONOSUPPORT);
+ err = -EINVAL;
+ }
out:
if (ctx != (void *)data)
kfree(ctx);
@@ -341,7 +348,7 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (hdr.reserved) {
- err = -EOPNOTSUPP;
+ err = -EPROTONOSUPPORT;
goto out;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 381fd9c096ae..5b811bf574d6 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -62,14 +62,16 @@ MODULE_LICENSE("Dual BSD/GPL");
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
- IB_UVERBS_MAX_DEVICES = 32
+ IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UVERBS_NUM_FIXED_MINOR = 32,
+ IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
};
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
+static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
-static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
@@ -339,11 +341,11 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
sizeof(struct ib_uverbs_comp_event_desc));
}
-static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
struct file *filp,
struct poll_table_struct *wait)
{
- unsigned int pollflags = 0;
+ __poll_t pollflags = 0;
poll_wait(filp, &ev_queue->poll_wait, wait);
@@ -355,13 +357,13 @@ static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
return pollflags;
}
-static unsigned int ib_uverbs_async_event_poll(struct file *filp,
+static __poll_t ib_uverbs_async_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
return ib_uverbs_event_poll(filp->private_data, filp, wait);
}
-static unsigned int ib_uverbs_comp_event_poll(struct file *filp,
+static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_uverbs_completion_event_file *comp_ev_file =
@@ -1005,34 +1007,6 @@ static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
-
-/*
- * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
- * requesting a new major number and doubling the number of max devices we
- * support. It's stupid, but simple.
- */
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
- "infiniband_verbs");
- if (ret) {
- pr_err("user_verbs: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
- if (ret >= IB_UVERBS_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
@@ -1062,24 +1036,15 @@ static void ib_uverbs_add_one(struct ib_device *device)
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
- spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES) {
- spin_unlock(&map_lock);
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- spin_lock(&map_lock);
- uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- uverbs_dev->devnum = devnum;
- base = devnum + IB_UVERBS_BASE_DEV;
- set_bit(devnum, dev_map);
- }
- spin_unlock(&map_lock);
+ if (devnum >= IB_UVERBS_MAX_DEVICES)
+ goto err;
+ uverbs_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
+ base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
+ else
+ base = IB_UVERBS_BASE_DEV + devnum;
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
@@ -1124,10 +1089,7 @@ err_class:
err_cdev:
cdev_del(&uverbs_dev->cdev);
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
@@ -1219,11 +1181,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
dev_set_drvdata(uverbs_dev->dev, NULL);
device_destroy(uverbs_class, uverbs_dev->cdev.dev);
cdev_del(&uverbs_dev->cdev);
-
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(uverbs_dev->devnum, dev_map);
- else
- clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
+ clear_bit(uverbs_dev->devnum, dev_map);
if (device->disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
@@ -1265,13 +1223,22 @@ static int __init ib_uverbs_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR,
"infiniband_verbs");
if (ret) {
pr_err("user_verbs: couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
+ IB_UVERBS_NUM_DYNAMIC_MINOR,
+ "infiniband_verbs");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class);
@@ -1299,7 +1266,12 @@ out_class:
class_destroy(uverbs_class);
out_chrdev:
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
+
+out_alloc:
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
out:
return ret;
@@ -1309,9 +1281,10 @@ static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
class_destroy(uverbs_class);
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
}
module_init(ib_uverbs_init);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index c3ee5d9b336d..b571176babbe 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -35,6 +35,7 @@
#include <rdma/ib_verbs.h>
#include <linux/bug.h>
#include <linux/file.h>
+#include <rdma/restrack.h>
#include "rdma_core.h"
#include "uverbs.h"
@@ -319,6 +320,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
obj->uobject.object = cq;
obj->uobject.user_handle = user_handle;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
if (ret)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e36d27ed4daa..16ebc6372c31 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -124,16 +124,24 @@ EXPORT_SYMBOL(ib_wc_status_msg);
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
- case IB_RATE_2_5_GBPS: return 1;
- case IB_RATE_5_GBPS: return 2;
- case IB_RATE_10_GBPS: return 4;
- case IB_RATE_20_GBPS: return 8;
- case IB_RATE_30_GBPS: return 12;
- case IB_RATE_40_GBPS: return 16;
- case IB_RATE_60_GBPS: return 24;
- case IB_RATE_80_GBPS: return 32;
- case IB_RATE_120_GBPS: return 48;
- default: return -1;
+ case IB_RATE_2_5_GBPS: return 1;
+ case IB_RATE_5_GBPS: return 2;
+ case IB_RATE_10_GBPS: return 4;
+ case IB_RATE_20_GBPS: return 8;
+ case IB_RATE_30_GBPS: return 12;
+ case IB_RATE_40_GBPS: return 16;
+ case IB_RATE_60_GBPS: return 24;
+ case IB_RATE_80_GBPS: return 32;
+ case IB_RATE_120_GBPS: return 48;
+ case IB_RATE_14_GBPS: return 6;
+ case IB_RATE_56_GBPS: return 22;
+ case IB_RATE_112_GBPS: return 45;
+ case IB_RATE_168_GBPS: return 67;
+ case IB_RATE_25_GBPS: return 10;
+ case IB_RATE_100_GBPS: return 40;
+ case IB_RATE_200_GBPS: return 80;
+ case IB_RATE_300_GBPS: return 120;
+ default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mult);
@@ -141,16 +149,24 @@ EXPORT_SYMBOL(ib_rate_to_mult);
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{
switch (mult) {
- case 1: return IB_RATE_2_5_GBPS;
- case 2: return IB_RATE_5_GBPS;
- case 4: return IB_RATE_10_GBPS;
- case 8: return IB_RATE_20_GBPS;
- case 12: return IB_RATE_30_GBPS;
- case 16: return IB_RATE_40_GBPS;
- case 24: return IB_RATE_60_GBPS;
- case 32: return IB_RATE_80_GBPS;
- case 48: return IB_RATE_120_GBPS;
- default: return IB_RATE_PORT_CURRENT;
+ case 1: return IB_RATE_2_5_GBPS;
+ case 2: return IB_RATE_5_GBPS;
+ case 4: return IB_RATE_10_GBPS;
+ case 8: return IB_RATE_20_GBPS;
+ case 12: return IB_RATE_30_GBPS;
+ case 16: return IB_RATE_40_GBPS;
+ case 24: return IB_RATE_60_GBPS;
+ case 32: return IB_RATE_80_GBPS;
+ case 48: return IB_RATE_120_GBPS;
+ case 6: return IB_RATE_14_GBPS;
+ case 22: return IB_RATE_56_GBPS;
+ case 45: return IB_RATE_112_GBPS;
+ case 67: return IB_RATE_168_GBPS;
+ case 10: return IB_RATE_25_GBPS;
+ case 40: return IB_RATE_100_GBPS;
+ case 80: return IB_RATE_200_GBPS;
+ case 120: return IB_RATE_300_GBPS;
+ default: return IB_RATE_PORT_CURRENT;
}
}
EXPORT_SYMBOL(mult_to_ib_rate);
@@ -247,6 +263,10 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
}
+ pd->res.type = RDMA_RESTRACK_PD;
+ pd->res.kern_name = caller;
+ rdma_restrack_add(&pd->res);
+
if (mr_access_flags) {
struct ib_mr *mr;
@@ -296,6 +316,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
requires the caller to guarantee we can't race here. */
WARN_ON(atomic_read(&pd->usecnt));
+ rdma_restrack_del(&pd->res);
/* Making delalloc_pd a void return is a WIP, no driver should return
an error here. */
ret = pd->device->dealloc_pd(pd);
@@ -421,8 +442,7 @@ static bool find_gid_index(const union ib_gid *gid,
const struct ib_gid_attr *gid_attr,
void *context)
{
- struct find_gid_index_context *ctx =
- (struct find_gid_index_context *)context;
+ struct find_gid_index_context *ctx = context;
if (ctx->gid_type != gid_attr->gid_type)
return false;
@@ -481,8 +501,53 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
}
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+/* Resolve destination mac address and hop limit for unicast destination
+ * GID entry, considering the source GID entry as well.
+ * ah_attribute must have have valid port_num, sgid_index.
+ */
+static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ struct ib_gid_attr sgid_attr;
+ struct ib_global_route *grh;
+ int hop_limit = 0xff;
+ union ib_gid sgid;
+ int ret;
+
+ grh = rdma_ah_retrieve_grh(ah_attr);
+
+ ret = ib_query_gid(device,
+ rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index,
+ &sgid, &sgid_attr);
+ if (ret || !sgid_attr.ndev) {
+ if (!ret)
+ ret = -ENXIO;
+ return ret;
+ }
+
+ /* If destination is link local and source GID is RoCEv1,
+ * IP stack is not used.
+ */
+ if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
+ sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
+ rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
+ ah_attr->roce.dmac);
+ goto done;
+ }
+
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
+ ah_attr->roce.dmac,
+ sgid_attr.ndev, &hop_limit);
+done:
+ dev_put(sgid_attr.ndev);
+
+ grh->hop_limit = hop_limit;
+ return ret;
+}
+
/*
- * This function creates ah from the incoming packet.
+ * This function initializes address handle attributes from the incoming packet.
* Incoming packet has dgid of the receiver node on which this code is
* getting executed and, sgid contains the GID of the sender.
*
@@ -490,13 +555,10 @@ EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
* as sgid and, sgid is used as dgid because sgid contains destinations
* GID whom to respond to.
*
- * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
- * position of arguments dgid and sgid do not match the order of the
- * parameters.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr)
{
u32 flow_class;
u16 gid_index;
@@ -523,57 +585,33 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
if (ret)
return ret;
+ rdma_ah_set_sl(ah_attr, wc->sl);
+ rdma_ah_set_port_num(ah_attr, port_num);
+
if (rdma_protocol_roce(device, port_num)) {
- int if_index = 0;
u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
wc->vlan_id : 0xffff;
- struct net_device *idev;
- struct net_device *resolved_dev;
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
- if (!device->get_netdev)
- return -EOPNOTSUPP;
-
- idev = device->get_netdev(device, port_num);
- if (!idev)
- return -ENODEV;
-
- ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
- ah_attr->roce.dmac,
- wc->wc_flags & IB_WC_WITH_VLAN ?
- NULL : &vlan_id,
- &if_index, &hoplimit);
- if (ret) {
- dev_put(idev);
- return ret;
- }
-
- resolved_dev = dev_get_by_index(&init_net, if_index);
- rcu_read_lock();
- if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
- resolved_dev))
- ret = -EHOSTUNREACH;
- rcu_read_unlock();
- dev_put(idev);
- dev_put(resolved_dev);
+ ret = get_sgid_index_from_eth(device, port_num,
+ vlan_id, &dgid,
+ gid_type, &gid_index);
if (ret)
return ret;
- ret = get_sgid_index_from_eth(device, port_num, vlan_id,
- &dgid, gid_type, &gid_index);
- if (ret)
- return ret;
- }
-
- rdma_ah_set_dlid(ah_attr, wc->slid);
- rdma_ah_set_sl(ah_attr, wc->sl);
- rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- rdma_ah_set_port_num(ah_attr, port_num);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ return ib_resolve_unicast_gid_dmac(device, ah_attr);
+ } else {
+ rdma_ah_set_dlid(ah_attr, wc->slid);
+ rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- if (wc->wc_flags & IB_WC_GRH) {
- if (!rdma_cap_eth_ah(device, port_num)) {
+ if (wc->wc_flags & IB_WC_GRH) {
if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
ret = ib_find_cached_gid_by_port(device, &dgid,
IB_GID_TYPE_IB,
@@ -584,18 +622,17 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
} else {
gid_index = 0;
}
- }
-
- flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
- flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ }
+ return 0;
}
- return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_wc);
+EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
const struct ib_grh *grh, u8 port_num)
@@ -603,7 +640,7 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
struct rdma_ah_attr ah_attr;
int ret;
- ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
+ ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
if (ret)
return ERR_PTR(ret);
@@ -850,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = device->create_qp(pd, qp_init_attr, NULL);
+ qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
if (IS_ERR(qp))
return qp;
@@ -860,7 +897,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return ERR_PTR(ret);
}
- qp->device = device;
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
@@ -890,7 +926,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
atomic_inc(&qp_init_attr->srq->usecnt);
}
- qp->pd = pd;
qp->send_cq = qp_init_attr->send_cq;
qp->xrcd = NULL;
@@ -1269,16 +1304,8 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
return -EINVAL;
- if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)
- return 0;
-
grh = rdma_ah_retrieve_grh(ah_attr);
- if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
- rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
- ah_attr->roce.dmac);
- return 0;
- }
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
__be32 addr = 0;
@@ -1290,40 +1317,52 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
(char *)ah_attr->roce.dmac);
}
} else {
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
- int ifindex;
- int hop_limit;
-
- ret = ib_query_gid(device,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index,
- &sgid, &sgid_attr);
-
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- goto out;
- }
-
- ifindex = sgid_attr.ndev->ifindex;
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ }
+ return ret;
+}
- ret =
- rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- ah_attr->roce.dmac,
- NULL, &ifindex, &hop_limit);
+/**
+ * IB core internal function to perform QP attributes modification.
+ */
+static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ int ret;
- dev_put(sgid_attr.ndev);
+ if (rdma_ib_or_roce(qp->device, port)) {
+ if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
+ pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->rq_psn &= 0xffffff;
+ }
- grh->hop_limit = hop_limit;
+ if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
+ pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->sq_psn &= 0xffffff;
+ }
}
-out:
+
+ ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
+ if (!ret && (attr_mask & IB_QP_PORT))
+ qp->port = attr->port_num;
+
return ret;
}
+static bool is_qp_type_connected(const struct ib_qp *qp)
+{
+ return (qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_RC ||
+ qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT);
+}
+
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
- * @qp: The QP to modify.
+ * @ib_qp: The QP to modify.
* @attr: On input, specifies the QP attributes to modify. On output,
* the current values of selected QP attributes are returned.
* @attr_mask: A bit-mask used to specify which attributes of the QP
@@ -1332,21 +1371,20 @@ out:
* are being modified.
* It returns 0 on success and returns appropriate error code on error.
*/
-int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+ struct ib_qp *qp = ib_qp->real_qp;
int ret;
- if (attr_mask & IB_QP_AV) {
+ if (attr_mask & IB_QP_AV &&
+ attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
if (ret)
return ret;
}
- ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
- if (!ret && (attr_mask & IB_QP_PORT))
- qp->port = attr->port_num;
-
- return ret;
+ return _ib_modify_qp(qp, attr, attr_mask, udata);
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
@@ -1409,7 +1447,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
+ return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
@@ -1503,6 +1541,7 @@ int ib_destroy_qp(struct ib_qp *qp)
if (!qp->uobject)
rdma_rw_cleanup_mrs(qp);
+ rdma_restrack_del(&qp->res);
ret = qp->device->destroy_qp(qp);
if (!ret) {
if (pd)
@@ -1545,6 +1584,8 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
cq->event_handler = event_handler;
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
}
return cq;
@@ -1563,6 +1604,7 @@ int ib_destroy_cq(struct ib_cq *cq)
if (atomic_read(&cq->usecnt))
return -EBUSY;
+ rdma_restrack_del(&cq->res);
return cq->device->destroy_cq(cq);
}
EXPORT_SYMBOL(ib_destroy_cq);
@@ -1747,7 +1789,7 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
}
EXPORT_SYMBOL(ib_detach_mcast);
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
{
struct ib_xrcd *xrcd;
@@ -1765,7 +1807,7 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
return xrcd;
}
-EXPORT_SYMBOL(ib_alloc_xrcd);
+EXPORT_SYMBOL(__ib_alloc_xrcd);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
@@ -1790,11 +1832,11 @@ EXPORT_SYMBOL(ib_dealloc_xrcd);
* ib_create_wq - Creates a WQ associated with the specified protection
* domain.
* @pd: The protection domain associated with the WQ.
- * @wq_init_attr: A list of initial attributes required to create the
+ * @wq_attr: A list of initial attributes required to create the
* WQ. If WQ creation succeeds, then the attributes are updated to
* the actual capabilities of the created WQ.
*
- * wq_init_attr->max_wr and wq_init_attr->max_sge determine
+ * wq_attr->max_wr and wq_attr->max_sge determine
* the requested size of the WQ, and set to the actual values allocated
* on return.
* If ib_create_wq() succeeds, then max_wr and max_sge will always be
@@ -2156,16 +2198,16 @@ static void __ib_drain_sq(struct ib_qp *qp)
struct ib_send_wr swr = {}, *bad_swr;
int ret;
- swr.wr_cqe = &sdrain.cqe;
- sdrain.cqe.done = ib_drain_qp_done;
- init_completion(&sdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
}
+ swr.wr_cqe = &sdrain.cqe;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
ret = ib_post_send(qp, &swr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
@@ -2190,16 +2232,16 @@ static void __ib_drain_rq(struct ib_qp *qp)
struct ib_recv_wr rwr = {}, *bad_rwr;
int ret;
- rwr.wr_cqe = &rdrain.cqe;
- rdrain.cqe.done = ib_drain_qp_done;
- init_completion(&rdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
}
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
ret = ib_post_recv(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ecbac91b2e14..ca32057e886f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -43,20 +43,41 @@
#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
-
-#define BNXT_RE_PAGE_SIZE_4K BIT(12)
-#define BNXT_RE_PAGE_SIZE_8K BIT(13)
-#define BNXT_RE_PAGE_SIZE_64K BIT(16)
-#define BNXT_RE_PAGE_SIZE_2M BIT(21)
-#define BNXT_RE_PAGE_SIZE_8M BIT(23)
-#define BNXT_RE_PAGE_SIZE_1G BIT(30)
-
-#define BNXT_RE_MAX_MR_SIZE BIT(30)
+#define BNXT_RE_PAGE_SHIFT_4K (12)
+#define BNXT_RE_PAGE_SHIFT_8K (13)
+#define BNXT_RE_PAGE_SHIFT_64K (16)
+#define BNXT_RE_PAGE_SHIFT_2M (21)
+#define BNXT_RE_PAGE_SHIFT_8M (23)
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+
+#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
+#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
+#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
+#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
+#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
+#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
+
+#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
+#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
+/* Number of MRs to reserve for PF, leaving remainder for VFs */
+#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
+#define BNXT_RE_MAX_GID_PER_VF 128
+
+/*
+ * Percentage of resources of each type reserved for PF.
+ * Remaining resources are divided equally among VFs.
+ * [0, 100]
+ */
+#define BNXT_RE_PCT_RSVD_FOR_PF 50
#define BNXT_RE_UD_QP_HW_STALL 0x400000
@@ -100,6 +121,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
+#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
@@ -145,6 +167,9 @@ struct bnxt_re_dev {
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
atomic_t nq_alloc_cnt;
+ u32 is_virtfn;
+ u32 num_vfs;
+ struct bnxt_qplib_roce_stats stats;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 7b28219eba46..77416bc61e6e 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -58,16 +58,55 @@
#include "hw_counters.h"
static const char * const bnxt_re_stat_name[] = {
- [BNXT_RE_ACTIVE_QP] = "active_qps",
- [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
- [BNXT_RE_ACTIVE_CQ] = "active_cqs",
- [BNXT_RE_ACTIVE_MR] = "active_mrs",
- [BNXT_RE_ACTIVE_MW] = "active_mws",
- [BNXT_RE_RX_PKTS] = "rx_pkts",
- [BNXT_RE_RX_BYTES] = "rx_bytes",
- [BNXT_RE_TX_PKTS] = "tx_pkts",
- [BNXT_RE_TX_BYTES] = "tx_bytes",
- [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors"
+ [BNXT_RE_ACTIVE_QP] = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ] = "active_cqs",
+ [BNXT_RE_ACTIVE_MR] = "active_mrs",
+ [BNXT_RE_ACTIVE_MW] = "active_mws",
+ [BNXT_RE_RX_PKTS] = "rx_pkts",
+ [BNXT_RE_RX_BYTES] = "rx_bytes",
+ [BNXT_RE_TX_PKTS] = "tx_pkts",
+ [BNXT_RE_TX_BYTES] = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
+ [BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
+ [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd",
+ [BNXT_RE_MISSING_RESP] = "missin_resp",
+ [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err",
+ [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err",
+ [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err",
+ [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err",
+ [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err",
+ [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err",
+ [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err",
+ [BNXT_RE_DUP_REQ] = "dup_req",
+ [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max",
+ [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch",
+ [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe",
+ [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err",
+ [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey",
+ [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err",
+ [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm",
+ [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err",
+ [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey",
+ [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err",
+ [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm",
+ [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err",
+ [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow",
+ [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode",
+ [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic",
+ [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err",
+ [BNXT_RE_RES_MEM_ERROR] = "res_mem_err",
+ [BNXT_RE_RES_SRQ_ERR] = "res_srq_err",
+ [BNXT_RE_RES_CMP_ERR] = "res_cmp_err",
+ [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey",
+ [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err",
+ [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
+ [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
+ [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
+ [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err"
};
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
@@ -76,6 +115,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+ int rc = 0;
if (!port || !stats)
return -EINVAL;
@@ -97,6 +137,91 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_TX_BYTES] =
le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
}
+ if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
+ rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats);
+ if (rc)
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ stats->value[BNXT_RE_TO_RETRANSMITS] =
+ rdev->stats.to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
+ rdev->stats.seq_err_naks_rcvd;
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
+ rdev->stats.max_retry_exceeded;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] =
+ rdev->stats.rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] =
+ rdev->stats.missing_resp;
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
+ rdev->stats.unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] =
+ rdev->stats.bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
+ rdev->stats.local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
+ rdev->stats.local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
+ rdev->stats.mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
+ rdev->stats.remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
+ rdev->stats.remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] =
+ rdev->stats.remote_op_err;
+ stats->value[BNXT_RE_DUP_REQ] =
+ rdev->stats.dup_req;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] =
+ rdev->stats.res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
+ rdev->stats.res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
+ rdev->stats.res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] =
+ rdev->stats.res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
+ rdev->stats.res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
+ rdev->stats.res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] =
+ rdev->stats.res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
+ rdev->stats.res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
+ rdev->stats.res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
+ rdev->stats.res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] =
+ rdev->stats.res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
+ rdev->stats.res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
+ rdev->stats.res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
+ rdev->stats.res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
+ rdev->stats.res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] =
+ rdev->stats.res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR] =
+ rdev->stats.res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] =
+ rdev->stats.res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] =
+ rdev->stats.res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
+ rdev->stats.res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
+ rdev->stats.res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
+ rdev->stats.res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
+ rdev->stats.res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] =
+ rdev->stats.res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] =
+ rdev->stats.res_rx_pci_err;
+ }
+
return ARRAY_SIZE(bnxt_re_stat_name);
}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index be0dc0093b58..a01a922717d5 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -51,6 +51,45 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_TO_RETRANSMITS,
+ BNXT_RE_SEQ_ERR_NAKS_RCVD,
+ BNXT_RE_MAX_RETRY_EXCEEDED,
+ BNXT_RE_RNR_NAKS_RCVD,
+ BNXT_RE_MISSING_RESP,
+ BNXT_RE_UNRECOVERABLE_ERR,
+ BNXT_RE_BAD_RESP_ERR,
+ BNXT_RE_LOCAL_QP_OP_ERR,
+ BNXT_RE_LOCAL_PROTECTION_ERR,
+ BNXT_RE_MEM_MGMT_OP_ERR,
+ BNXT_RE_REMOTE_INVALID_REQ_ERR,
+ BNXT_RE_REMOTE_ACCESS_ERR,
+ BNXT_RE_REMOTE_OP_ERR,
+ BNXT_RE_DUP_REQ,
+ BNXT_RE_RES_EXCEED_MAX,
+ BNXT_RE_RES_LENGTH_MISMATCH,
+ BNXT_RE_RES_EXCEEDS_WQE,
+ BNXT_RE_RES_OPCODE_ERR,
+ BNXT_RE_RES_RX_INVALID_RKEY,
+ BNXT_RE_RES_RX_DOMAIN_ERR,
+ BNXT_RE_RES_RX_NO_PERM,
+ BNXT_RE_RES_RX_RANGE_ERR,
+ BNXT_RE_RES_TX_INVALID_RKEY,
+ BNXT_RE_RES_TX_DOMAIN_ERR,
+ BNXT_RE_RES_TX_NO_PERM,
+ BNXT_RE_RES_TX_RANGE_ERR,
+ BNXT_RE_RES_IRRQ_OFLOW,
+ BNXT_RE_RES_UNSUP_OPCODE,
+ BNXT_RE_RES_UNALIGNED_ATOMIC,
+ BNXT_RE_RES_REM_INV_ERR,
+ BNXT_RE_RES_MEM_ERROR,
+ BNXT_RE_RES_SRQ_ERR,
+ BNXT_RE_RES_CMP_ERR,
+ BNXT_RE_RES_INVALID_DUP_RKEY,
+ BNXT_RE_RES_WQE_FORMAT_ERR,
+ BNXT_RE_RES_CQ_LOAD_ERR,
+ BNXT_RE_RES_SRQ_LOAD_ERR,
+ BNXT_RE_RES_TX_PCI_ERR,
+ BNXT_RE_RES_RX_PCI_ERR,
BNXT_RE_NUM_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2032db7db766..9b8fa77b8831 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -141,12 +141,13 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
-
- ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
+ memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
+ min(sizeof(dev_attr->fw_ver),
+ sizeof(ib_attr->fw_ver)));
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -247,8 +248,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
IB_PORT_VENDOR_CLASS_SUP |
IB_PORT_IP_BASED_GIDS;
- /* Max MSG size set to 2G for now */
- port_attr->max_msg_sz = 0x80000000;
+ port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
port_attr->bad_pkey_cntr = 0;
port_attr->qkey_viol_cntr = 0;
port_attr->pkey_tbl_len = dev_attr->max_pkey;
@@ -281,6 +281,15 @@ int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+ rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
+ rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+}
+
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey)
{
@@ -532,7 +541,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
pbl_tbl = dma_addr;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
- BNXT_RE_FENCE_PBL_SIZE, false);
+ BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
goto fail;
@@ -1018,6 +1027,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_qp *qp;
struct bnxt_re_cq *cq;
+ struct bnxt_re_srq *srq;
int rc, entries;
if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
@@ -1073,9 +1083,15 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
}
if (qp_init_attr->srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not supported");
- rc = -ENOTSUPP;
- goto fail;
+ srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
+ ib_srq);
+ if (!srq) {
+ dev_err(rdev_to_dev(rdev), "SRQ not found");
+ rc = -EINVAL;
+ goto fail;
+ }
+ qp->qplib_qp.srq = &srq->qplib_srq;
+ qp->qplib_qp.rq.max_wqe = 0;
} else {
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty
@@ -1280,6 +1296,237 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}
}
+/* Shared Receive Queues */
+int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc;
+
+ if (qplib_srq->cq)
+ nq = qplib_srq->cq->nq;
+ rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
+ return rc;
+ }
+
+ if (srq->umem && !IS_ERR(srq->umem))
+ ib_umem_release(srq->umem);
+ kfree(srq);
+ atomic_dec(&rdev->srq_count);
+ if (nq)
+ nq->budget--;
+ return 0;
+}
+
+static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
+ struct bnxt_re_pd *pd,
+ struct bnxt_re_srq *srq,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq_req ureq;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct ib_umem *umem;
+ int bytes = 0;
+ struct ib_ucontext *context = pd->ib_pd.uobject->context;
+ struct bnxt_re_ucontext *cntx = container_of(context,
+ struct bnxt_re_ucontext,
+ ib_uctx);
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+ return -EFAULT;
+
+ bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get(context, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem))
+ return PTR_ERR(umem);
+
+ srq->umem = umem;
+ qplib_srq->nmap = umem->nmap;
+ qplib_srq->sglist = umem->sg_head.sgl;
+ qplib_srq->srq_handle = ureq.srq_handle;
+ qplib_srq->dpi = &cntx->dpi;
+
+ return 0;
+}
+
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_srq *srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc, entries;
+
+ if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+ dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ srq->rdev = rdev;
+ srq->qplib_srq.pd = &pd->qplib_pd;
+ srq->qplib_srq.dpi = &rdev->dpi_privileged;
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty
+ */
+ entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
+ if (entries > dev_attr->max_srq_wqes + 1)
+ entries = dev_attr->max_srq_wqes + 1;
+
+ srq->qplib_srq.max_wqe = entries;
+ srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
+ srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ srq->srq_limit = srq_init_attr->attr.srq_limit;
+ srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
+ nq = &rdev->nq[0];
+
+ if (udata) {
+ rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
+ if (rc)
+ goto fail;
+ }
+
+ rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_srq_resp resp;
+
+ resp.srqid = srq->qplib_srq.id;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
+ bnxt_qplib_destroy_srq(&rdev->qplib_res,
+ &srq->qplib_srq);
+ goto exit;
+ }
+ }
+ if (nq)
+ nq->budget++;
+ atomic_inc(&rdev->srq_count);
+
+ return &srq->ib_srq;
+
+fail:
+ if (udata && srq->umem && !IS_ERR(srq->umem)) {
+ ib_umem_release(srq->umem);
+ srq->umem = NULL;
+ }
+
+ kfree(srq);
+exit:
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+ break;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+ rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
+ return rc;
+ }
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+ break;
+ default:
+ dev_err(rdev_to_dev(rdev),
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_srq tsrq;
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ /* Get live SRQ attr */
+ tsrq.qplib_srq.id = srq->qplib_srq.id;
+ rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
+ return rc;
+ }
+ srq_attr->max_wr = srq->qplib_srq.max_wqe;
+ srq_attr->max_sge = srq->qplib_srq.max_sge;
+ srq_attr->srq_limit = tsrq.qplib_srq.threshold;
+
+ return 0;
+}
+
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_qplib_swqe wqe;
+ unsigned long flags;
+ int rc = 0, payload_sz = 0;
+
+ spin_lock_irqsave(&srq->lock, flags);
+ while (wr) {
+ /* Transcribe each ib_recv_wr to qplib_swqe */
+ wqe.num_sge = wr->num_sge;
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
+ wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return rc;
+}
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
@@ -2295,10 +2542,14 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
{
- struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_dev *rdev = cq->rdev;
int rc;
- struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
+ struct bnxt_re_cq *cq;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_re_dev *rdev;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+ nq = cq->qplib_cq.nq;
rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2308,12 +2559,11 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
if (!IS_ERR_OR_NULL(cq->umem))
ib_umem_release(cq->umem);
- if (cq) {
- kfree(cq->cql);
- kfree(cq);
- }
atomic_dec(&rdev->cq_count);
nq->budget--;
+ kfree(cq->cql);
+ kfree(cq);
+
return 0;
}
@@ -3078,7 +3328,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->qplib_mr.hwq.level = PBL_LVL_MAX;
mr->qplib_mr.total_size = -1; /* Infinte length */
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
+ PAGE_SIZE);
if (rc)
goto fail_mr;
@@ -3104,10 +3355,8 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
+ if (rc)
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
- return rc;
- }
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
@@ -3170,7 +3419,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
- goto fail;
+ goto bail;
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->ib_mr.lkey;
@@ -3192,9 +3441,10 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
return &mr->ib_mr;
fail_mr:
- bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-fail:
kfree(mr->pages);
+fail:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+bail:
kfree(mr);
return ERR_PTR(rc);
}
@@ -3248,6 +3498,46 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
+static int bnxt_re_page_size_ok(int page_shift)
+{
+ switch (page_shift) {
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
+ int page_shift)
+{
+ u64 *pbl_tbl = pbl_tbl_orig;
+ u64 paddr;
+ u64 page_mask = (1ULL << page_shift) - 1;
+ int i, pages;
+ struct scatterlist *sg;
+ int entry;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (i = 0; i < pages; i++) {
+ paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
+ if (pbl_tbl == pbl_tbl_orig)
+ *pbl_tbl++ = paddr & ~page_mask;
+ else if ((paddr & page_mask) == 0)
+ *pbl_tbl++ = paddr;
+ }
+ }
+ return pbl_tbl - pbl_tbl_orig;
+}
+
/* uverbs */
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
@@ -3257,10 +3547,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
struct ib_umem *umem;
- u64 *pbl_tbl, *pbl_tbl_orig;
- int i, umem_pgs, pages, rc;
- struct scatterlist *sg;
- int entry;
+ u64 *pbl_tbl = NULL;
+ int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
@@ -3277,64 +3565,68 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
umem = ib_umem_get(ib_pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
- goto free_mr;
+ goto free_mrw;
}
mr->ib_umem = umem;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
- goto release_umem;
- }
- /* The fixed portion of the rkey is the same as the lkey */
- mr->ib_mr.rkey = mr->qplib_mr.rkey;
-
mr->qplib_mr.va = virt_addr;
umem_pgs = ib_umem_page_count(umem);
if (!umem_pgs) {
dev_err(rdev_to_dev(rdev), "umem is invalid!");
rc = -EINVAL;
- goto free_mrw;
+ goto free_umem;
}
mr->qplib_mr.total_size = length;
pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
if (!pbl_tbl) {
- rc = -EINVAL;
- goto free_mrw;
+ rc = -ENOMEM;
+ goto free_umem;
}
- pbl_tbl_orig = pbl_tbl;
- if (umem->hugetlb) {
- dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
+ page_shift = umem->page_shift;
+
+ if (!bnxt_re_page_size_ok(page_shift)) {
+ dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
rc = -EFAULT;
goto fail;
}
- if (umem->page_shift != PAGE_SHIFT) {
- dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
- rc = -EFAULT;
+ if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+ dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
+ length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
+ rc = -EINVAL;
goto fail;
}
- /* Map umem buf ptrs to the PBL */
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> umem->page_shift;
- for (i = 0; i < pages; i++, pbl_tbl++)
- *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
+ if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
+ page_shift = BNXT_RE_PAGE_SHIFT_2M;
+ dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
+ 1 << page_shift);
}
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
- umem_pgs, false);
+
+ /* Map umem buf ptrs to the PBL */
+ umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
+ umem_pgs, false, 1 << page_shift);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register user MR");
goto fail;
}
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
@@ -3342,11 +3634,11 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return &mr->ib_mr;
fail:
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
+free_umem:
+ ib_umem_release(umem);
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-release_umem:
- ib_umem_release(umem);
free_mr:
kfree(mr);
return ERR_PTR(rc);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 1df11ed272ea..423ebe012f95 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -68,6 +68,15 @@ struct bnxt_re_ah {
struct bnxt_qplib_ah qplib_ah;
};
+struct bnxt_re_srq {
+ struct bnxt_re_dev *rdev;
+ u32 srq_limit;
+ struct ib_srq ib_srq;
+ struct bnxt_qplib_srq qplib_srq;
+ struct ib_umem *umem;
+ spinlock_t lock; /* protect srq */
+};
+
struct bnxt_re_qp {
struct list_head list;
struct bnxt_re_dev *rdev;
@@ -143,6 +152,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
@@ -164,6 +174,16 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah);
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int bnxt_re_destroy_srq(struct ib_srq *srq);
+int bnxt_re_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index aafc19aa5de1..508d00a5a106 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,79 @@ static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+/* SR-IOV helper functions */
+
+static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
+{
+ struct bnxt *bp;
+
+ bp = netdev_priv(rdev->en_dev->net);
+ if (BNXT_VF(bp))
+ rdev->is_virtfn = 1;
+}
+
+/* Set the maximum number of each resource that the driver actually wants
+ * to allocate. This may be up to the maximum number the firmware has
+ * reserved for the function. The driver may choose to allocate fewer
+ * resources than the firmware maximum.
+ */
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
+ u32 i;
+ u32 vf_pct;
+ u32 num_vfs;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+
+ rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ dev_attr->max_qp);
+
+ rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ /* Use max_mr from fw since max_mrw does not get set */
+ rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
+ dev_attr->max_mr);
+ rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ dev_attr->max_srq);
+ rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
+ dev_attr->max_cq);
+
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+
+ if (rdev->num_vfs) {
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ num_vfs = 100 * rdev->num_vfs;
+ vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
+ vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
+ vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF
+ * and divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware
+ * severely restricts the number of MRs, then let PF have
+ * half and divide the rest among VFs, as for the other
+ * resource types.
+ */
+ if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
+ vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
+ else
+ vf_mrws = (rdev->qplib_ctx.mrw_count -
+ BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
+ vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ }
+ rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
+ rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
+ rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
+ rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
+ rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+}
+
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
{
@@ -91,6 +164,15 @@ static void bnxt_re_start(void *p)
static void bnxt_re_sriov_config(void *p, int num_vfs)
{
+ struct bnxt_re_dev *rdev = p;
+
+ if (!rdev)
+ return;
+
+ rdev->num_vfs = num_vfs;
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
}
static void bnxt_re_shutdown(void *p)
@@ -417,7 +499,7 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return ERR_PTR(-EINVAL);
if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
- dev_dbg(&pdev->dev,
+ dev_info(&pdev->dev,
"%s: probe error: RoCE is not supported on this device",
ROCE_DRV_MODULE_NAME);
return ERR_PTR(-ENODEV);
@@ -490,6 +572,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_port = bnxt_re_query_port;
ibdev->get_port_immutable = bnxt_re_get_port_immutable;
+ ibdev->get_dev_fw_str = bnxt_re_query_fw_str;
ibdev->query_pkey = bnxt_re_query_pkey;
ibdev->query_gid = bnxt_re_query_gid;
ibdev->get_netdev = bnxt_re_get_netdev;
@@ -505,6 +588,12 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_ah = bnxt_re_query_ah;
ibdev->destroy_ah = bnxt_re_destroy_ah;
+ ibdev->create_srq = bnxt_re_create_srq;
+ ibdev->modify_srq = bnxt_re_modify_srq;
+ ibdev->query_srq = bnxt_re_query_srq;
+ ibdev->destroy_srq = bnxt_re_destroy_srq;
+ ibdev->post_srq_recv = bnxt_re_post_srq_recv;
+
ibdev->create_qp = bnxt_re_create_qp;
ibdev->modify_qp = bnxt_re_modify_qp;
ibdev->query_qp = bnxt_re_query_qp;
@@ -541,14 +630,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->dev_attr.fw_ver);
-}
-
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -558,12 +639,10 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
-static DEVICE_ATTR(fw_rev, 0444, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
static struct device_attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_rev,
&dev_attr_hca_type
};
@@ -616,10 +695,10 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
return rdev;
}
-static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
- struct creq_func_event *aeqe)
+static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
+ *unaffi_async)
{
- switch (aeqe->event) {
+ switch (unaffi_async->event) {
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
@@ -648,6 +727,93 @@ static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
return 0;
}
+static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
+ struct bnxt_re_qp *qp)
+{
+ struct ib_event event;
+
+ memset(&event, 0, sizeof(event));
+ if (qp->qplib_qp.srq) {
+ event.device = &qp->rdev->ibdev;
+ event.element.qp = &qp->ib_qp;
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ }
+
+ if (event.device && qp->ib_qp.event_handler)
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+
+ return 0;
+}
+
+static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
+ void *obj)
+{
+ int rc = 0;
+ u8 event;
+
+ if (!obj)
+ return rc; /* QP was already dead, still return success */
+
+ event = affi_async->event;
+ if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
+ struct bnxt_qplib_qp *lib_qp = obj;
+ struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ rc = bnxt_re_handle_qp_async_event(affi_async, qp);
+ }
+ return rc;
+}
+
+static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
+ void *aeqe, void *obj)
+{
+ struct creq_qp_event *affi_async;
+ struct creq_func_event *unaffi_async;
+ u8 type;
+ int rc;
+
+ type = ((struct creq_base *)aeqe)->type;
+ if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
+ unaffi_async = aeqe;
+ rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
+ } else {
+ affi_async = aeqe;
+ rc = bnxt_re_handle_affi_async_event(affi_async, obj);
+ }
+
+ return rc;
+}
+
+static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *handle, u8 event)
+{
+ struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
+ qplib_srq);
+ struct ib_event ib_event;
+ int rc = 0;
+
+ if (!srq) {
+ dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
+ ROCE_DRV_MODULE_NAME);
+ rc = -EINVAL;
+ goto done;
+ }
+ ib_event.device = &srq->rdev->ibdev;
+ ib_event.element.srq = &srq->ib_srq;
+ if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
+ ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ else
+ ib_event.event = IB_EVENT_SRQ_ERR;
+
+ if (srq->ib_srq.event_handler) {
+ /* Lock event_handler? */
+ (*srq->ib_srq.event_handler)(&ib_event,
+ srq->ib_srq.srq_context);
+ }
+done:
+ return rc;
+}
+
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *handle)
{
@@ -690,7 +856,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->msix_entries[i].vector,
rdev->msix_entries[i].db_offset,
- &bnxt_re_cqn_handler, NULL);
+ &bnxt_re_cqn_handler,
+ &bnxt_re_srqn_handler);
if (rc) {
dev_err(rdev_to_dev(rdev),
@@ -734,7 +901,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto fail;
@@ -1035,19 +1203,6 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
}
}
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
-{
- u32 i;
-
- rdev->qplib_ctx.qpc_count = BNXT_RE_MAX_QPC_COUNT;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT;
- rdev->qplib_ctx.srqc_count = BNXT_RE_MAX_SRQC_COUNT;
- rdev->qplib_ctx.cq_count = BNXT_RE_MAX_CQ_COUNT;
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-}
-
/* worker thread for polling periodic events. Now used for QoS programming*/
static void bnxt_re_worker(struct work_struct *work)
{
@@ -1070,6 +1225,9 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ /* Check whether VF or PF */
+ bnxt_re_get_sriov_func_type(rdev);
+
rc = bnxt_re_request_msix(rdev);
if (rc) {
pr_err("Failed to get MSI-X vectors: %#x\n", rc);
@@ -1101,16 +1259,18 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
(rdev->en_dev->pdev, &rdev->rcfw,
rdev->msix_entries[BNXT_RE_AEQ_IDX].vector,
rdev->msix_entries[BNXT_RE_AEQ_IDX].db_offset,
- 0, &bnxt_re_aeq_handler);
+ rdev->is_virtfn, &bnxt_re_aeq_handler);
if (rc) {
pr_err("Failed to enable RCFW channel: %#x\n", rc);
goto free_ring;
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- bnxt_re_set_resource_limits(rdev);
+ if (!rdev->is_virtfn)
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0);
if (rc) {
@@ -1125,7 +1285,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto free_ctx;
}
- rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, 0);
+ rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
+ rdev->is_virtfn);
if (rc) {
pr_err("Failed to initialize RCFW: %#x\n", rc);
goto free_sctx;
@@ -1144,13 +1305,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto fail;
}
- rc = bnxt_re_setup_qos(rdev);
- if (rc)
- pr_info("RoCE priority not yet configured\n");
+ if (!rdev->is_virtfn) {
+ rc = bnxt_re_setup_qos(rdev);
+ if (rc)
+ pr_info("RoCE priority not yet configured\n");
- INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
- set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
+ set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
+ schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ }
/* Register ib dev */
rc = bnxt_re_register_ib(rdev);
@@ -1176,6 +1339,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
@@ -1400,7 +1564,7 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_re_dev *rdev, *next;
LIST_HEAD(to_be_deleted);
mutex_lock(&bnxt_re_dev_lock);
@@ -1408,8 +1572,11 @@ static void __exit bnxt_re_mod_exit(void)
if (!list_empty(&bnxt_re_dev_list))
list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
mutex_unlock(&bnxt_re_dev_lock);
-
- list_for_each_entry(rdev, &to_be_deleted, list) {
+ /*
+ * Cleanup the devices in reverse order so that the VF device
+ * cleanup is done before PF cleanup
+ */
+ list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 61764f7aa79b..8b5f11ac0e42 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -52,6 +52,7 @@
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
{
@@ -278,6 +279,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct nq_base *nqe, **nq_ptr;
struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
+ int num_srqne_processed = 0;
u32 sw_cons, raw_cons;
u16 type;
int budget = nq->budget;
@@ -320,6 +322,26 @@ static void bnxt_qplib_service_nq(unsigned long data)
spin_unlock_bh(&cq->compl_lock);
break;
}
+ case NQ_BASE_TYPE_SRQ_EVENT:
+ {
+ struct nq_srq_event *nqsrqe =
+ (struct nq_srq_event *)nqe;
+
+ q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
+ << 32;
+ bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
+ DBR_DBR_TYPE_SRQ_ARMENA);
+ if (!nq->srqn_handler(nq,
+ (struct bnxt_qplib_srq *)q_handle,
+ nqsrqe->event))
+ num_srqne_processed++;
+ else
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: SRQ event 0x%x not handled",
+ nqsrqe->event);
+ break;
+ }
case NQ_BASE_TYPE_DBQ_EVENT:
break;
default:
@@ -384,17 +406,19 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *, u8 event))
+ struct bnxt_qplib_srq *,
+ u8 event))
{
resource_size_t nq_base;
int rc = -1;
nq->pdev = pdev;
nq->vector = msix_vector;
+ if (cqn_handler)
+ nq->cqn_handler = cqn_handler;
- nq->cqn_handler = cqn_handler;
-
- nq->srqn_handler = srqn_handler;
+ if (srqn_handler)
+ nq->srqn_handler = srqn_handler;
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
@@ -410,7 +434,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
if (rc) {
dev_err(&nq->pdev->dev,
"Failed to request IRQ for NQ: %#x", rc);
- bnxt_qplib_disable_nq(nq);
goto fail;
}
@@ -469,6 +492,238 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
return 0;
}
+/* SRQ */
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct dbr_dbr db_msg = { 0 };
+ void __iomem *db;
+ u32 sw_prod = 0;
+
+ /* Ring DB */
+ sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
+ HWQ_CMP(srq_hwq->prod, srq_hwq);
+ db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
+ DBR_DBR_INDEX_MASK);
+ db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
+ DBR_DBR_XID_MASK) | arm_type);
+ db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
+ srq->dbr_base : srq->dpi->dbr;
+ wmb(); /* barrier before db ring */
+ __iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
+}
+
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_destroy_srq req;
+ struct creq_destroy_srq_resp resp;
+ u16 cmd_flags = 0;
+ int rc;
+
+ RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+ return 0;
+}
+
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_create_srq req;
+ struct creq_create_srq_resp resp;
+ struct bnxt_qplib_pbl *pbl;
+ u16 cmd_flags = 0;
+ int rc, idx;
+
+ srq->hwq.max_elements = srq->max_wqe;
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
+ srq->nmap, &srq->hwq.max_elements,
+ BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
+ PAGE_SIZE, HWQ_TYPE_QUEUE);
+ if (rc)
+ goto exit;
+
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq)
+ goto fail;
+
+ RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.dpi = cpu_to_le32(srq->dpi->dpi);
+ req.srq_handle = cpu_to_le64(srq);
+
+ req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
+ pbl = &srq->hwq.pbl[PBL_LVL_0];
+ req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
+ CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT) |
+ (pbl->pg_size == ROCE_PG_SIZE_4K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
+ pbl->pg_size == ROCE_PG_SIZE_8K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
+ pbl->pg_size == ROCE_PG_SIZE_64K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
+ pbl->pg_size == ROCE_PG_SIZE_2M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
+ pbl->pg_size == ROCE_PG_SIZE_8M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
+ pbl->pg_size == ROCE_PG_SIZE_1G ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
+ req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ req.pd_id = cpu_to_le32(srq->pd->id);
+ req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ goto fail;
+
+ spin_lock_init(&srq->lock);
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.max_elements - 1;
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+
+ srq->id = le32_to_cpu(resp.xid);
+ srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
+ if (srq->threshold)
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+exit:
+ return rc;
+}
+
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ u32 sw_prod, sw_cons, count = 0;
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ if (count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ } else {
+ /* Deferred arming */
+ srq->arm_req = true;
+ }
+
+ return 0;
+}
+
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_query_srq req;
+ struct creq_query_srq_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_srq_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ /* Configure the request */
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf)
+ return -ENOMEM;
+ sb = sbuf->sb;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ srq->threshold = le16_to_cpu(sb->srq_limit);
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+
+ return rc;
+}
+
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe, **srqe_ptr;
+ struct sq_sge *hw_sge;
+ u32 sw_prod, sw_cons, count = 0;
+ int i, rc = 0, next;
+
+ spin_lock(&srq_hwq->lock);
+ if (srq->start_idx == srq->last_idx) {
+ dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
+ srq->id);
+ rc = -EINVAL;
+ spin_unlock(&srq_hwq->lock);
+ goto done;
+ }
+ next = srq->start_idx;
+ srq->start_idx = srq->swq[next].next_idx;
+ spin_unlock(&srq_hwq->lock);
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
+ srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
+ memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ /* Calculate wqe_size16 and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
+ i < wqe->num_sge; i++, hw_sge++) {
+ hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
+ hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
+ hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
+ }
+ srqe->wqe_type = wqe->type;
+ srqe->flags = wqe->flags;
+ srqe->wqe_size = wqe->num_sge +
+ ((offsetof(typeof(*srqe), data) + 15) >> 4);
+ srqe->wr_id[0] = cpu_to_le32((u32)next);
+ srq->swq[next].wr_id = wqe->wr_id;
+
+ srq_hwq->prod++;
+
+ spin_lock(&srq_hwq->lock);
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ /* retaining srq_hwq->cons for this logic
+ * actually the lock is only required to
+ * read srq_hwq->cons.
+ */
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
+ if (srq->arm_req == true && count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ }
+done:
+ return rc;
+}
+
/* QP */
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
@@ -737,6 +992,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
pbl->pg_size == ROCE_PG_SIZE_1G ?
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
+ } else {
+ /* SRQ */
+ if (qp->srq) {
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
+ req.srq_cid = cpu_to_le32(qp->srq->id);
+ }
}
if (qp->rcq)
@@ -2068,6 +2329,16 @@ done:
return rc;
}
+static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
+{
+ spin_lock(&srq->hwq.lock);
+ srq->swq[srq->last_idx].next_idx = (int)tag;
+ srq->last_idx = (int)tag;
+ srq->swq[srq->last_idx].next_idx = -1;
+ srq->hwq.cons++; /* Support for SRQE counter */
+ spin_unlock(&srq->hwq.lock);
+}
+
static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct cq_res_rc *hwcqe,
struct bnxt_qplib_cqe **pcqe,
@@ -2075,6 +2346,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2102,27 +2374,46 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
@@ -2136,6 +2427,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2166,27 +2458,48 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
hwcqe->src_qp_high_srq_or_rq_wr_id) &
CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx %#x exceeded RQ max %#x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
return rc;
@@ -2218,6 +2531,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2256,26 +2570,49 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: SRQ used but not defined??");
+ return -EINVAL;
+ }
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: ix 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index c582d4ec8173..211b27a8f9e2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -39,6 +39,27 @@
#ifndef __BNXT_QPLIB_FP_H__
#define __BNXT_QPLIB_FP_H__
+struct bnxt_qplib_srq {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ void __iomem *dbr_base;
+ u64 srq_handle;
+ u32 id;
+ u32 max_wqe;
+ u32 max_sge;
+ u32 threshold;
+ bool arm_req;
+ struct bnxt_qplib_cq *cq;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_swq *swq;
+ struct scatterlist *sglist;
+ int start_idx;
+ int last_idx;
+ u32 nmap;
+ u16 eventq_hw_ring_id;
+ spinlock_t lock; /* protect SRQE link list */
+};
+
struct bnxt_qplib_sge {
u64 addr;
u32 lkey;
@@ -79,6 +100,7 @@ static inline u32 get_psne_idx(u32 val)
struct bnxt_qplib_swq {
u64 wr_id;
+ int next_idx;
u8 type;
u8 flags;
u32 start_psn;
@@ -404,29 +426,27 @@ struct bnxt_qplib_cq {
writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
struct bnxt_qplib_nq {
- struct pci_dev *pdev;
-
- int vector;
- cpumask_t mask;
- int budget;
- bool requested;
- struct tasklet_struct worker;
- struct bnxt_qplib_hwq hwq;
-
- u16 bar_reg;
- u16 bar_reg_off;
- u16 ring_id;
- void __iomem *bar_reg_iomem;
-
- int (*cqn_handler)
- (struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq);
- int (*srqn_handler)
- (struct bnxt_qplib_nq *nq,
- void *srq,
- u8 event);
- struct workqueue_struct *cqn_wq;
- char name[32];
+ struct pci_dev *pdev;
+
+ int vector;
+ cpumask_t mask;
+ int budget;
+ bool requested;
+ struct tasklet_struct worker;
+ struct bnxt_qplib_hwq hwq;
+
+ u16 bar_reg;
+ u16 bar_reg_off;
+ u16 ring_id;
+ void __iomem *bar_reg_iomem;
+
+ int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+ int (*srqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq,
+ u8 event);
+ struct workqueue_struct *cqn_wq;
+ char name[32];
};
struct bnxt_qplib_nq_work {
@@ -441,8 +461,18 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *srq,
+ struct bnxt_qplib_srq *srq,
u8 event));
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index bb5574adf195..8329ec6a7946 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -93,7 +93,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
- opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
+ opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
+ opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode);
@@ -615,7 +616,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *))
+ void *, void *))
{
resource_size_t res_base;
struct cmdq_init init;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2946a7cfae82..6bee6e3636ea 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -167,7 +167,7 @@ struct bnxt_qplib_rcfw {
#define FIRMWARE_TIMED_OUT 3
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *);
+ void *, void *);
u32 seq_num;
/* Bar region info */
@@ -199,9 +199,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)
- (struct bnxt_qplib_rcfw *,
- struct creq_func_event *));
+ int (*aeq_handler)(struct bnxt_qplib_rcfw *,
+ void *aeqe, void *obj));
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 4e101704e801..ad37d54affcc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -104,13 +104,12 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
if (!sghead) {
for (i = 0; i < pages; i++) {
- pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
- pbl->pg_size,
- &pbl->pg_map_arr[i],
- GFP_KERNEL);
+ pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
- memset(pbl->pg_arr[i], 0, pbl->pg_size);
pbl->pg_count++;
}
} else {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9543ce51a28a..c015c1861351 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -64,8 +64,28 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
+static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
+ char *fw_ver)
+{
+ struct cmdq_query_version req;
+ struct creq_query_version_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return;
+ fw_ver[0] = resp.fw_maj;
+ fw_ver[1] = resp.fw_minor;
+ fw_ver[2] = resp.fw_bld;
+ fw_ver[3] = resp.fw_rsvd;
+}
+
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+ struct bnxt_qplib_dev_attr *attr, bool vf)
{
struct cmdq_query_func req;
struct creq_query_func_resp resp;
@@ -95,7 +115,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
- attr->max_qp += 1;
+ if (!vf)
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -133,7 +154,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
attr->max_sgid = le32_to_cpu(sb->max_gid);
- strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
+ bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
@@ -150,6 +171,38 @@ bail:
return rc;
}
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct cmdq_set_func_resources req;
+ struct creq_set_func_resources_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
+
+ req.number_of_qp = cpu_to_le32(ctx->qpc_count);
+ req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
+ req.number_of_srq = cpu_to_le32(ctx->srqc_count);
+ req.number_of_cq = cpu_to_le32(ctx->cq_count);
+
+ req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
+ req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
+ req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
+ req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
+ req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp,
+ NULL, 0);
+ if (rc) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: Failed to set function resources");
+ }
+ return rc;
+}
+
/* SGID */
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
@@ -604,7 +657,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block)
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_register_mr req;
@@ -615,6 +668,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u32 pg_size;
if (num_pbls) {
+ /* Allocate memory for the non-leaf pages to store buf ptrs.
+ * Non-leaf pages always uses system PAGE_SIZE
+ */
pg_ptrs = roundup_pow_of_two(num_pbls);
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
if (!pages)
@@ -632,6 +688,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
mr->hwq.max_elements = pages;
+ /* Use system PAGE_SIZE */
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
&mr->hwq.max_elements,
PAGE_SIZE, 0, PAGE_SIZE,
@@ -652,18 +709,22 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
/* Configure the request */
if (mr->hwq.level == PBL_LVL_MAX) {
+ /* No PBL provided, just use system PAGE_SIZE */
level = 0;
req.pbl = 0;
pg_size = PAGE_SIZE;
} else {
level = mr->hwq.level + 1;
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
- pg_size = mr->hwq.pbl[PBL_LVL_0].pg_size;
}
+ pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
((ilog2(pg_size) <<
CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
+ req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
req.access = (mr->flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
@@ -729,3 +790,73 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
0);
return 0;
}
+
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats)
+{
+ struct cmdq_query_roce_stats req;
+ struct creq_query_roce_stats_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_roce_stats_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
+
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: SP: QUERY_ROCE_STATS alloc side buffer failed");
+ return -ENOMEM;
+ }
+
+ sb = sbuf->sb;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
+ stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
+ stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
+ stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
+ stats->missing_resp = le64_to_cpu(sb->missing_resp);
+ stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
+ stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
+ stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
+ stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
+ stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
+ stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
+ stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
+ stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
+ stats->dup_req = le64_to_cpu(sb->dup_req);
+ stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
+ stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
+ stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
+ stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
+ stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
+ stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
+ stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
+ stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
+ stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
+ stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
+ stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
+ stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
+ stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
+ stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
+ stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
+ stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
+ stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
+ stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
+ stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
+ stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
+ stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
+ stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
+ stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
+ stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
+ stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 11322582f5e4..9d3e8b994945 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -45,7 +45,8 @@
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
struct bnxt_qplib_dev_attr {
- char fw_ver[32];
+#define FW_VER_ARR_LEN 4
+ u8 fw_ver[FW_VER_ARR_LEN];
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
@@ -127,6 +128,85 @@ struct bnxt_qplib_frpl {
#define BNXT_QPLIB_ACCESS_ZERO_BASED BIT(5)
#define BNXT_QPLIB_ACCESS_ON_DEMAND BIT(6)
+struct bnxt_qplib_roce_stats {
+ u64 to_retransmits;
+ u64 seq_err_naks_rcvd;
+ /* seq_err_naks_rcvd is 64 b */
+ u64 max_retry_exceeded;
+ /* max_retry_exceeded is 64 b */
+ u64 rnr_naks_rcvd;
+ /* rnr_naks_rcvd is 64 b */
+ u64 missing_resp;
+ u64 unrecoverable_err;
+ /* unrecoverable_err is 64 b */
+ u64 bad_resp_err;
+ /* bad_resp_err is 64 b */
+ u64 local_qp_op_err;
+ /* local_qp_op_err is 64 b */
+ u64 local_protection_err;
+ /* local_protection_err is 64 b */
+ u64 mem_mgmt_op_err;
+ /* mem_mgmt_op_err is 64 b */
+ u64 remote_invalid_req_err;
+ /* remote_invalid_req_err is 64 b */
+ u64 remote_access_err;
+ /* remote_access_err is 64 b */
+ u64 remote_op_err;
+ /* remote_op_err is 64 b */
+ u64 dup_req;
+ /* dup_req is 64 b */
+ u64 res_exceed_max;
+ /* res_exceed_max is 64 b */
+ u64 res_length_mismatch;
+ /* res_length_mismatch is 64 b */
+ u64 res_exceeds_wqe;
+ /* res_exceeds_wqe is 64 b */
+ u64 res_opcode_err;
+ /* res_opcode_err is 64 b */
+ u64 res_rx_invalid_rkey;
+ /* res_rx_invalid_rkey is 64 b */
+ u64 res_rx_domain_err;
+ /* res_rx_domain_err is 64 b */
+ u64 res_rx_no_perm;
+ /* res_rx_no_perm is 64 b */
+ u64 res_rx_range_err;
+ /* res_rx_range_err is 64 b */
+ u64 res_tx_invalid_rkey;
+ /* res_tx_invalid_rkey is 64 b */
+ u64 res_tx_domain_err;
+ /* res_tx_domain_err is 64 b */
+ u64 res_tx_no_perm;
+ /* res_tx_no_perm is 64 b */
+ u64 res_tx_range_err;
+ /* res_tx_range_err is 64 b */
+ u64 res_irrq_oflow;
+ /* res_irrq_oflow is 64 b */
+ u64 res_unsup_opcode;
+ /* res_unsup_opcode is 64 b */
+ u64 res_unaligned_atomic;
+ /* res_unaligned_atomic is 64 b */
+ u64 res_rem_inv_err;
+ /* res_rem_inv_err is 64 b */
+ u64 res_mem_error;
+ /* res_mem_error is 64 b */
+ u64 res_srq_err;
+ /* res_srq_err is 64 b */
+ u64 res_cmp_err;
+ /* res_cmp_err is 64 b */
+ u64 res_invalid_dup_rkey;
+ /* res_invalid_dup_rkey is 64 b */
+ u64 res_wqe_format_err;
+ /* res_wqe_format_err is 64 b */
+ u64 res_cq_load_err;
+ /* res_cq_load_err is 64 b */
+ u64 res_srq_load_err;
+ /* res_srq_load_err is 64 b */
+ u64 res_tx_pci_err;
+ /* res_tx_pci_err is 64 b */
+ u64 res_rx_pci_err;
+ /* res_rx_pci_err is 64 b */
+};
+
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
@@ -147,7 +227,10 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+ struct bnxt_qplib_dev_attr *attr, bool vf);
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
@@ -155,7 +238,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block);
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
@@ -164,4 +247,6 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl);
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats);
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index c3cba6063a03..2d7ea096a247 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -954,6 +954,7 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL
#define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL
#define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -1383,8 +1384,20 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL
#define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL
#define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL
+ #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED1 0x80UL
u8 access;
#define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL
@@ -1392,7 +1405,21 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL
#define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL
#define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL
- __le16 unused_1;
+ __le16 log2_pbl_pg_size;
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_REGISTER_MR_UNUSED11_SFT 5
__le32 key;
__le64 pbl;
__le64 va;
@@ -1799,6 +1826,16 @@ struct cmdq_set_func_resources {
u8 resp_size;
u8 reserved8;
__le64 resp_addr;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
};
/* Read hardware resource context command (24 bytes) */
@@ -2013,6 +2050,20 @@ struct creq_modify_qp_resp {
__le16 reserved48[3];
};
+/* cmdq_query_roce_stats (size:128b/16B) */
+struct cmdq_query_roce_stats {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST \
+ CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
/* Query QP command response (16 bytes) */
struct creq_query_qp_resp {
u8 type;
@@ -2783,6 +2834,80 @@ struct creq_query_cc_resp_sb {
__le64 reserved64_1;
};
+/* creq_query_roce_stats_resp (size:128b/16B) */
+struct creq_query_roce_stats_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_resp_sb (size:2624b/328B) */
+struct creq_query_roce_stats_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le32 num_counters;
+ __le32 rsvd1;
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+};
+
/* QP error notification event (16 bytes) */
struct creq_qp_error_notification {
u8 type;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 21db3b48a617..4cf17c650c36 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -257,8 +257,8 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
ep->emss);
}
@@ -2733,9 +2733,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status, neg_adv_str(req->status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -3567,8 +3566,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- pr_info("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_debug("ignoring disconnect ep %p state %u\n",
+ ep, ep->com.state);
break;
default:
WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
@@ -4097,9 +4096,15 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- ret = work_handlers[opcode](dev, skb);
- if (!ret)
+ if (opcode >= ARRAY_SIZE(work_handlers) ||
+ !work_handlers[opcode]) {
+ pr_err("No handler for opcode 0x%x.\n", opcode);
kfree_skb(skb);
+ } else {
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
process_timedout_eps();
}
}
@@ -4201,8 +4206,8 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status,
neg_adv_str(req->status));
goto out;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index af77d128d242..7a9d0de89d6a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(c4iw_wr_log_size_order,
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-struct workqueue_struct *reg_workq;
+static struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
@@ -108,19 +108,19 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
(wq->rdev->wr_log_size - 1);
le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
- getnstimeofday(&le.poll_host_ts);
+ le.poll_host_time = ktime_get();
le.valid = 1;
le.cqe_sge_ts = CQE_TS(cqe);
if (SQ_TYPE(cqe)) {
le.qid = wq->sq.qid;
le.opcode = CQE_OPCODE(cqe);
- le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+ le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time;
le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
le.wr_id = CQE_WRID_SQ_IDX(cqe);
} else {
le.qid = wq->rq.qid;
le.opcode = FW_RI_RECEIVE;
- le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+ le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
le.wr_id = CQE_WRID_MSN(cqe);
}
@@ -130,9 +130,9 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
static int wr_log_show(struct seq_file *seq, void *v)
{
struct c4iw_dev *dev = seq->private;
- struct timespec prev_ts = {0, 0};
+ ktime_t prev_time;
struct wr_log_entry *lep;
- int prev_ts_set = 0;
+ int prev_time_set = 0;
int idx, end;
#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
@@ -145,33 +145,29 @@ static int wr_log_show(struct seq_file *seq, void *v)
lep = &dev->rdev.wr_log[idx];
while (idx != end) {
if (lep->valid) {
- if (!prev_ts_set) {
- prev_ts_set = 1;
- prev_ts = lep->poll_host_ts;
+ if (!prev_time_set) {
+ prev_time_set = 1;
+ prev_time = lep->poll_host_time;
}
- seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
- "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+ seq_printf(seq, "%04u: nsec %llu qid %u opcode "
+ "%u %s 0x%x host_wr_delta nsec %llu "
"post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
"poll_sge_ts 0x%llx post_poll_delta_ns %llu "
"cqe_poll_delta_ns %llu\n",
idx,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ prev_time)),
lep->qid, lep->opcode,
lep->opcode == FW_RI_RECEIVE ?
"msn" : "wrid",
lep->wr_id,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ lep->post_host_time)),
lep->post_sge_ts, lep->cqe_sge_ts,
lep->poll_sge_ts,
ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
- prev_ts = lep->poll_host_ts;
+ prev_time = lep->poll_host_time;
}
idx++;
if (idx > (dev->rdev.wr_log_size - 1))
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index a252d5c40ae3..3e9d8b277ab9 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -236,7 +236,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
- pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
+ pr_debug("unknown cqid 0x%x\n", qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 65dd3726ca02..cc929002c05e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -153,8 +153,8 @@ struct c4iw_hw_queue {
};
struct wr_log_entry {
- struct timespec post_host_ts;
- struct timespec poll_host_ts;
+ ktime_t post_host_time;
+ ktime_t poll_host_time;
u64 post_sge_ts;
u64 cqe_sge_ts;
u64 poll_sge_ts;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d5c92fc520d6..de77b6027d69 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1042,7 +1042,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (c4iw_wr_log) {
swsqe->sge_ts = cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(&swsqe->host_ts);
+ swsqe->host_time = ktime_get();
}
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
@@ -1117,8 +1117,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(
- &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
+ ktime_get();
}
wqe->recv.opcode = FW_RI_RECV_WR;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 79e8ee12c391..8369c7c8de83 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -277,7 +277,7 @@ struct t4_swsqe {
int signaled;
u16 idx;
int flushed;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
@@ -318,7 +318,7 @@ struct t4_sq {
struct t4_swrqe {
u64 wr_id;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 4f057e8ffe50..6660f920f42e 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6518,11 +6518,12 @@ static void _dc_start(struct hfi1_devdata *dd)
if (!dd->dc_shutdown)
return;
- /*
- * Take the 8051 out of reset, wait until 8051 is ready, and set host
- * version bit.
- */
- release_and_wait_ready_8051_firmware(dd);
+ /* Take the 8051 out of reset */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+ /* Wait until 8051 is ready */
+ if (wait_fm_ready(dd, TIMEOUT_8051_START))
+ dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
+ __func__);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -8564,23 +8565,27 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
}
/*
- * If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
- * will still continue executing.
- *
* Returns:
* < 0 = Linux error, not able to get access
* > 0 = 8051 command RETURN_CODE
*/
-static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
{
u64 reg, completed;
int return_code;
unsigned long timeout;
- lockdep_assert_held(&dd->dc8051_lock);
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
+ mutex_lock(&dd->dc8051_lock);
+
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
/*
* If an 8051 host command timed out previously, then the 8051 is
* stuck.
@@ -8681,29 +8686,6 @@ static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
fail:
- return return_code;
-}
-
-/*
- * Returns:
- * < 0 = Linux error, not able to get access
- * > 0 = 8051 command RETURN_CODE
- */
-static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- /* We can't send any commands to the 8051 if it's in reset */
- if (dd->dc_shutdown) {
- return_code = -ENODEV;
- goto fail;
- }
-
- return_code = _do_8051_command(dd, type, in_data, out_data);
-
-fail:
mutex_unlock(&dd->dc8051_lock);
return return_code;
}
@@ -8713,17 +8695,16 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
}
-static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
{
u64 data;
int ret;
- lockdep_assert_held(&dd->dc8051_lock);
data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
| (u64)config_data << LOAD_DATA_DATA_SHIFT;
- ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
"load 8051 config: field id %d, lane %d, err %d\n",
@@ -8732,18 +8713,6 @@ static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
return ret;
}
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- return_code = _load_8051_config(dd, field_id, lane_id, config_data);
- mutex_unlock(&dd->dc8051_lock);
-
- return return_code;
-}
-
/*
* Read the 8051 firmware "registers". Use the RAM directly. Always
* set the result, even on error.
@@ -8859,14 +8828,13 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
u32 frame;
u32 mask;
- lockdep_assert_held(&dd->dc8051_lock);
mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
/* Clear, then set field */
frame &= ~mask;
frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
- return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
- frame);
+ return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
}
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
@@ -9270,6 +9238,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ goto set_local_link_attributes_fail;
+ }
+
/*
* DC supports continuous updates.
*/
@@ -14944,9 +14920,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
num_vls > HFI1_MAX_VLS_SUPPORTED) {
- hfi1_early_err(&pdev->dev,
- "Invalid num_vls %u, using %u VLs\n",
- num_vls, HFI1_MAX_VLS_SUPPORTED);
+ dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
+ num_vls, HFI1_MAX_VLS_SUPPORTED);
num_vls = HFI1_MAX_VLS_SUPPORTED;
}
ppd->vls_supported = num_vls;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 133e313feca4..21fca8ec5076 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -508,6 +508,7 @@
#define DOWN_REMOTE_REASON_SHIFT 16
#define DOWN_REMOTE_REASON_MASK 0xff
+#define HOST_INTERFACE_VERSION 1
#define HOST_INTERFACE_VERSION_SHIFT 16
#define HOST_INTERFACE_VERSION_MASK 0xff
@@ -713,7 +714,6 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch);
int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
void read_guid(struct hfi1_devdata *dd);
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
u8 neigh_reason, u8 rem_reason);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 4f65ac671044..067b29f35f21 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -159,22 +159,6 @@ static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
}
-const char *get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
- return iname;
-}
-
-const char *get_card_name(struct rvt_dev_info *rdi)
-{
- struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
- struct hfi1_devdata *dd = container_of(ibdev,
- struct hfi1_devdata, verbs_dev);
- return get_unit_name(dd->unit);
-}
-
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
{
struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1df7da47f431..bd6f03cc5ee0 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -74,7 +74,7 @@
static int hfi1_file_open(struct inode *inode, struct file *fp);
static int hfi1_file_close(struct inode *inode, struct file *fp);
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt);
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
static u64 kvirt_to_phys(void *addr);
@@ -102,8 +102,8 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
struct hfi1_user_info *uinfo,
struct hfi1_ctxtdata **cd);
static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
-static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
-static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
unsigned long arg);
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
@@ -607,10 +607,10 @@ static int vma_fault(struct vm_fault *vmf)
return 0;
}
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt)
{
struct hfi1_ctxtdata *uctxt;
- unsigned pollflag;
+ __poll_t pollflag;
uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
if (!uctxt)
@@ -1425,13 +1425,13 @@ static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
return ret;
}
-static unsigned int poll_urgent(struct file *fp,
+static __poll_t poll_urgent(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
@@ -1448,13 +1448,13 @@ static unsigned int poll_urgent(struct file *fp,
return pollflag;
}
-static unsigned int poll_next(struct file *fp,
+static __poll_t poll_next(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 98868df78a7e..2b57ba70ddd6 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -68,7 +68,6 @@
#define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
#define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
-#define HOST_INTERFACE_VERSION 1
MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
@@ -976,46 +975,6 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
}
/*
- * Clear all reset bits, releasing the 8051.
- * Wait for firmware to be ready to accept host requests.
- * Then, set host version bit.
- *
- * This function executes even if the 8051 is in reset mode when
- * dd->dc_shutdown == 1.
- *
- * Expects dd->dc8051_lock to be held.
- */
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
-{
- int ret;
-
- lockdep_assert_held(&dd->dc8051_lock);
- /* clear all reset bits, releasing the 8051 */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
- /*
- * Wait for firmware to be ready to accept host
- * requests.
- */
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) {
- dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
- get_firmware_state(dd));
- return ret;
- }
-
- ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to set host interface version, return 0x%x\n",
- ret);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
* Load the 8051 firmware.
*/
static int load_8051_firmware(struct hfi1_devdata *dd,
@@ -1080,22 +1039,31 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
if (ret)
return ret;
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
/*
- * Clear all reset bits, releasing the 8051.
* DC reset step 5. Wait for firmware to be ready to accept host
* requests.
- * Then, set host version bit.
*/
- mutex_lock(&dd->dc8051_lock);
- ret = release_and_wait_ready_8051_firmware(dd);
- mutex_unlock(&dd->dc8051_lock);
- if (ret)
- return ret;
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) { /* timed out */
+ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
+ get_firmware_state(dd));
+ return -ETIMEDOUT;
+ }
read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
(int)ver_major, (int)ver_minor, (int)ver_patch);
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 8ce9118d4a7f..b42c22292597 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1623,7 +1623,7 @@ static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
* the 'error info' for this failure.
*/
static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
- u16 slid)
+ u32 slid)
{
struct hfi1_devdata *dd = ppd->dd;
@@ -1971,8 +1971,6 @@ int get_platform_config_field(struct hfi1_devdata *dd,
table_type, int table_index, int field_index,
u32 *data, u32 len);
-const char *get_unit_name(int unit);
-const char *get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -2122,39 +2120,42 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err_ratelimited(dd, fmt, ...) \
dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_warn_ratelimited(dd, fmt, ...) \
dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_info(dd, fmt, ...) \
dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_info_ratelimited(dd, fmt, ...) \
dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_dbg(dd, fmt, ...) \
dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define hfi1_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
- get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 8e3b3e7d829a..9b128268fb28 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1272,6 +1272,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+
/*
* Initialize all locks for the device. This needs to be as early as
* possible so locks are usable.
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index cf8dba34fe30..34547a48a445 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4348,11 +4348,7 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
*/
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
return 0;
- /*
- * On OPA devices it is okay to lose the upper 16 bits of LID as this
- * information is obtained elsewhere. Mask off the upper 16 bits.
- */
- ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
+ ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
return 1;
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 4b01ccd895b4..5507910e8b8a 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -556,6 +556,8 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
struct sdma_engine *sde;
struct send_context *send_context;
struct rvt_ack_entry *e = NULL;
+ struct rvt_srq *srq = qp->ibqp.srq ?
+ ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
sde = qp_to_sdma_engine(qp, priv->s_sc);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
@@ -563,7 +565,7 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
if (qp->s_ack_queue)
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
seq_printf(s,
- "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x\n",
+ "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
iter->n,
qp_idle(qp) ? "I" : "B",
qp->ibqp.qp_num,
@@ -610,7 +612,11 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
/* ack queue information */
e ? e->opcode : 0,
e ? e->psn : 0,
- e ? e->lpsn : 0);
+ e ? e->lpsn : 0,
+ qp->r_min_rnr_timer,
+ srq ? "SRQ" : "RQ",
+ srq ? srq->rq.size : qp->r_rq.size
+ );
}
void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index af5f7936f7e5..14cc212a21c7 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
@@ -843,11 +841,11 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
/* Convert dwords to flits */
len = (*hwords + *nwords) >> 1;
- hfi1_make_16b_hdr(hdr,
- ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr),
+ hfi1_make_16b_hdr(hdr, ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1)),
opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
- 16B),
- len, pkey, becn, 0, l4, sc5);
+ 16B), len, pkey, becn, 0, l4, sc5);
bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
bth0 |= extra_bytes << 20;
@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
}
/* Ensure s_rdma_ack_cnt changes are committed */
- smp_read_barrier_depends();
if (qp->s_rdma_ack_cnt) {
hfi1_queue_rc_ack(qp, is_fecn);
return;
@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 2c7fc6e331ea..13b994738f41 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 31c8f89b5fc8..61c130dbed10 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
{
- smp_read_barrier_depends(); /* see sdma_update_tail() */
return sde->tx_ring[sde->tx_head & sde->sdma_mask];
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 991bbee04821..132b63e787d1 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index beb5091eccca..deb184574395 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a38785e224cc..b8776a362a91 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1486,7 +1486,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
- mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
+ mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
/*
* sm_lid of 0xFFFF needs special handling so that it can
@@ -1844,7 +1844,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
struct hfi1_ibport *ibp = &ppd->ibport_data;
unsigned i;
int ret;
- size_t lcpysz = IB_DEVICE_NAME_MAX;
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
@@ -1872,8 +1871,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
*/
if (!ib_hfi1_sys_image_guid)
ib_hfi1_sys_image_guid = ibdev->node_guid;
- lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
- strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
ibdev->owner = THIS_MODULE;
ibdev->phys_port_cnt = dd->num_pports;
ibdev->dev.parent = &dd->pcidev->dev;
@@ -1893,7 +1890,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index ff426a625e13..97bf2cd1cacb 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
+hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 1085cb249bc1..9ebe839d8b24 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -103,6 +103,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
context->out_param = out_param;
complete(&context->done);
}
+EXPORT_SYMBOL_GPL(hns_roce_cmd_event);
/* this should be called with "use_events" */
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index b1c94223c28b..9549ae51a0dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -88,6 +88,16 @@ enum {
HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+ HNS_ROCE_CMD_QUERY_AEQC = 0x82,
+ HNS_ROCE_CMD_DESTROY_AEQC = 0x83,
+ HNS_ROCE_CMD_CREATE_CEQC = 0x90,
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
};
enum {
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 7ecb7a4147a8..dd67fafd0c40 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -376,6 +376,12 @@
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
+#define ROCEE_VF_MB_CFG0_REG 0x40
+#define ROCEE_VF_MB_STATUS_REG 0x58
+
+#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
+#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
+
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
@@ -385,4 +391,9 @@
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
#define ROCEE_VF_SGID_CFG4_REG 0x10010
+#define ROCEE_VF_ABN_INT_CFG_REG 0x13000
+#define ROCEE_VF_ABN_INT_ST_REG 0x13004
+#define ROCEE_VF_ABN_INT_EN_REG 0x13008
+#define ROCEE_VF_EVENT_INT_EN_REG 0x1300c
+
#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 2111b57a3489..bccc9b54c9ce 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -196,15 +196,14 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
- if (hr_dev->eq_table.eq) {
- /* Waiting interrupt process procedure carried out */
- synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
-
- /* wait for all interrupt processed */
- if (atomic_dec_and_test(&hr_cq->refcount))
- complete(&hr_cq->free);
- wait_for_completion(&hr_cq->free);
- }
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
@@ -460,6 +459,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
++cq->arm_sn;
cq->comp(cq);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_completion);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
@@ -482,6 +482,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_event);
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index b154ce40cded..42c3b5a2d441 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,12 +62,16 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
-#define HNS_ROCE_MAX_IRQ_NUM 34
+#define HNS_ROCE_MAX_IRQ_NUM 128
-#define HNS_ROCE_COMP_VEC_NUM 32
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
-#define HNS_ROCE_AEQE_VEC_NUM 1
-#define HNS_ROCE_AEQE_OF_VEC_NUM 1
+#define HNS_ROCE_CEQ 0
+#define HNS_ROCE_AEQ 1
+
+#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
+#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28
@@ -130,6 +134,7 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
HNS_ROCE_EVENT_TYPE_MB = 0x13,
HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
+ HNS_ROCE_EVENT_TYPE_FLR = 0x15,
};
/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
@@ -173,6 +178,7 @@ enum {
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+ HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
};
enum hns_roce_mtt_type {
@@ -441,6 +447,21 @@ struct hns_roce_cmd_mailbox {
struct hns_roce_dev;
+struct hns_roce_rinl_sge {
+ void *addr;
+ u32 len;
+};
+
+struct hns_roce_rinl_wqe {
+ struct hns_roce_rinl_sge *sg_list;
+ u32 sge_cnt;
+};
+
+struct hns_roce_rinl_buf {
+ struct hns_roce_rinl_wqe *wqe_list;
+ u32 wqe_cnt;
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
@@ -462,7 +483,9 @@ struct hns_roce_qp {
u8 resp_depth;
u8 state;
u32 access_flags;
+ u32 atomic_rd_en;
u32 pkey_index;
+ u32 qkey;
void (*event)(struct hns_roce_qp *,
enum hns_roce_event);
unsigned long qpn;
@@ -472,6 +495,8 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+
+ struct hns_roce_rinl_buf rq_inl_buf;
};
struct hns_roce_sqp {
@@ -485,6 +510,45 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
+enum {
+ HNS_ROCE_EQ_STAT_INVALID = 0,
+ HNS_ROCE_EQ_STAT_VALID = 2,
+};
+
+struct hns_roce_ceqe {
+ u32 comp;
+};
+
+struct hns_roce_aeqe {
+ u32 asyn;
+ union {
+ struct {
+ u32 qp;
+ u32 rsv0;
+ u32 rsv1;
+ } qp_event;
+
+ struct {
+ u32 cq;
+ u32 rsv0;
+ u32 rsv1;
+ } cq_event;
+
+ struct {
+ u32 ceqe;
+ u32 rsv0;
+ u32 rsv1;
+ } ce_event;
+
+ struct {
+ __le64 out_param;
+ __le16 token;
+ u8 status;
+ u8 rsv0;
+ } __packed cmd;
+ } event;
+};
+
struct hns_roce_eq {
struct hns_roce_dev *hr_dev;
void __iomem *doorbell;
@@ -498,11 +562,31 @@ struct hns_roce_eq {
int log_page_size;
int cons_index;
struct hns_roce_buf_list *buf_list;
+ int over_ignore;
+ int coalesce;
+ int arm_st;
+ u64 eqe_ba;
+ int eqe_ba_pg_sz;
+ int eqe_buf_pg_sz;
+ int hop_num;
+ u64 *bt_l0; /* Base address table for L0 */
+ u64 **bt_l1; /* Base address table for L1 */
+ u64 **buf;
+ dma_addr_t l0_dma;
+ dma_addr_t *l1_dma;
+ dma_addr_t *buf_dma;
+ u32 l0_last_num; /* L0 last chunk num */
+ u32 l1_last_num; /* L1 last chunk num */
+ int eq_max_cnt;
+ int eq_period;
+ int shift;
+ dma_addr_t cur_eqe_ba;
+ dma_addr_t nxt_eqe_ba;
};
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
- void __iomem **eqc_base;
+ void __iomem **eqc_base; /* only for hw v1 */
};
struct hns_roce_caps {
@@ -528,7 +612,7 @@ struct hns_roce_caps {
u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
- int num_comp_vectors; /* 32 ceq */
+ int num_comp_vectors;
int num_other_vectors;
int num_mtpts;
u32 num_mtt_segs;
@@ -550,7 +634,7 @@ struct hns_roce_caps {
u32 pbl_buf_pg_sz;
u32 pbl_hop_num;
int aeqe_depth;
- int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
+ int ceqe_depth;
enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 srqc_bt_num;
@@ -574,6 +658,9 @@ struct hns_roce_caps {
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
+ u32 eqe_ba_pg_sz;
+ u32 eqe_buf_pg_sz;
+ u32 eqe_hop_num;
u32 chunk_sz; /* chunk size in non multihop mode*/
u64 flags;
};
@@ -623,6 +710,8 @@ struct hns_roce_hw {
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+ int (*init_eq)(struct hns_roce_dev *hr_dev);
+ void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
};
struct hns_roce_dev {
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
deleted file mode 100644
index d184431e2bf5..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ /dev/null
@@ -1,759 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include "hns_roce_common.h"
-#include "hns_roce_device.h"
-#include "hns_roce_eq.h"
-
-static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
-{
- roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
- (req_not << eq->log_entries), eq->doorbell);
- /* Memory barrier */
- mb();
-}
-
-static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE;
-
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
-
- return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
- !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
-}
-
-static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SL_ERROR:
- dev_warn(dev, "QP %d, SL error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_PORT_ERROR:
- dev_warn(dev, "QP %d, port error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Access Violation Work Queue Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- int phy_port;
- int qpn;
-
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
- if (qpn <= 1)
- qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
- "QP %d, phy_port %d.\n", qpn, phy_port);
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqn;
-
- cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
-static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
- dev_warn(dev, "SDB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
- dev_warn(dev, "SDB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
- dev_warn(dev, "ODB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
- dev_warn(dev, "ODB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- default:
- break;
- }
-}
-
-static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_aeqe *aeqe;
- int aeqes_found = 0;
- int event_type;
-
- while ((aeqe = next_aeqe_sw(eq))) {
- dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- /* Memory barrier */
- rmb();
-
- event_type = roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "PATH MIG not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "COMMUNICATION established\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "SQ DRAINED not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "PATH MIG failed\n");
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ not support!\n");
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
- dev_warn(dev, "port change.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_MB:
- hns_roce_cmd_event(hr_dev,
- le16_to_cpu(aeqe->event.cmd.token),
- aeqe->event.cmd.status,
- le64_to_cpu(aeqe->event.cmd.out_param
- ));
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- hns_roce_db_overflow_handle(hr_dev, aeqe);
- break;
- case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ 0x%lx overflow.\n",
- roce_get_field(aeqe->event.ce_event.ceqe,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
- break;
- default:
- dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
- event_type, eq->eqn, eq->cons_index);
- break;
- }
-
- eq->cons_index++;
- aeqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
- dev_warn(dev, "cons_index overflow, set back to zero\n"
- );
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return aeqes_found;
-}
-
-static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE;
-
- return (struct hns_roce_ceqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
-
- return (!!(roce_get_bit(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
- (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
-}
-
-static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe;
- int ceqes_found = 0;
- u32 cqn;
-
- while ((ceqe = next_ceqe_sw(eq))) {
- /* Memory barrier */
- rmb();
- cqn = roce_get_field(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
- hns_roce_cq_completion(hr_dev, cqn);
-
- ++eq->cons_index;
- ceqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
- dev_warn(&eq->hr_dev->pdev->dev,
- "cons_index overflow, set back to zero\n");
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return ceqes_found;
-}
-
-static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = &eq->hr_dev->pdev->dev;
- int eqovf_found = 0;
- u32 caepaemask_val;
- u32 cealmovf_val;
- u32 caepaest_val;
- u32 aeshift_val;
- u32 ceshift_val;
- u32 cemask_val;
- int i = 0;
-
- /**
- * AEQ overflow ECC mult bit err CEQ overflow alarm
- * must clear interrupt, mask irq, clear irq, cancel mask operation
- */
- aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
-
- if (roce_get_bit(aeshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "AEQ overflow!\n");
-
- /* Set mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- roce_set_bit(caepaest_val,
- ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
- roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
-
- /* Clear mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
- }
-
- /* CEQ almost overflow */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
- i * CEQ_REG_OFFSET);
-
- if (roce_get_bit(ceshift_val,
- ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
- eqovf_found++;
-
- /* Set mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- cealmovf_val = roce_read(hr_dev,
- ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cealmovf_val,
- ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
- 1);
- roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET, cealmovf_val);
-
- /* Clear mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
- }
- }
-
- /* ECC multi-bit error alarm */
- dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
-
- dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
-
- return eqovf_found;
-}
-
-static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- int eqes_found = 0;
-
- if (likely(eq->type_flag == HNS_ROCE_CEQ))
- /* CEQ irq routine, CEQ is pulse irq, not clear */
- eqes_found = hns_roce_ceq_int(hr_dev, eq);
- else if (likely(eq->type_flag == HNS_ROCE_AEQ))
- /* AEQ irq routine, AEQ is pulse irq, not clear */
- eqes_found = hns_roce_aeq_int(hr_dev, eq);
- else
- /* AEQ queue overflow irq */
- eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
-
- return eqes_found;
-}
-
-static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
-{
- int int_work = 0;
- struct hns_roce_eq *eq = eq_ptr;
- struct hns_roce_dev *hr_dev = eq->hr_dev;
-
- int_work = hns_roce_eq_int(hr_dev, eq);
-
- return IRQ_RETVAL(int_work);
-}
-
-static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
- int enable_flag)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
- u32 val;
-
- val = readl(eqc);
-
- if (enable_flag)
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_VALID);
- else
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- writel(val, eqc);
-}
-
-static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t tmp_dma_addr;
- u32 eqconsindx_val = 0;
- u32 eqcuridx_val = 0;
- u32 eqshift_val = 0;
- int num_bas = 0;
- int ret;
- int i;
-
- num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
- dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
- (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
- num_bas);
- return -EINVAL;
- }
-
- eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
- if (!eq->buf_list)
- return -ENOMEM;
-
- for (i = 0; i < num_bas; ++i) {
- eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
- &tmp_dma_addr,
- GFP_KERNEL);
- if (!eq->buf_list[i].buf) {
- ret = -ENOMEM;
- goto err_out_free_pages;
- }
-
- eq->buf_list[i].map = tmp_dma_addr;
- memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
- }
- eq->cons_index = 0;
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
- eq->log_entries);
- writel(eqshift_val, eqc);
-
- /* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
-
- /*
- * Configure eq extended address 45~49 bit.
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
- eq->buf_list[0].map >> 44);
- roce_set_field(eqcuridx_val,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- writel(eqcuridx_val, eqc + 8);
-
- /* Configure eq consumer index */
- roce_set_field(eqconsindx_val,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- writel(eqconsindx_val, eqc + 0xc);
-
- return 0;
-
-err_out_free_pages:
- for (i = i - 1; i >= 0; i--)
- dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
- eq->buf_list[i].map);
-
- kfree(eq->buf_list);
- return ret;
-}
-
-static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- int i = 0;
- int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if (!eq->buf_list)
- return;
-
- for (i = 0; i < npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
- eq->buf_list[i].buf, eq->buf_list[i].map);
-
- kfree(eq->buf_list);
-}
-
-static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
-{
- int i = 0;
- u32 aemask_val;
- int masken = 0;
-
- /* AEQ INT */
- aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- masken);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
-
- /* CEQ INT */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- /* IRQ mask */
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, masken);
- }
-}
-
-static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
-{
- /* Configure ce int interval */
- roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_INTERVAL);
-
- /* Configure ce int burst num */
- roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
-}
-
-int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_eq *eq = NULL;
- int eq_num = 0;
- int ret = 0;
- int i = 0;
- int j = 0;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
- if (!eq_table->eq)
- return -ENOMEM;
-
- eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
- GFP_KERNEL);
- if (!eq_table->eqc_base) {
- ret = -ENOMEM;
- goto err_eqc_base_alloc_fail;
- }
-
- for (i = 0; i < eq_num; i++) {
- eq = &eq_table->eq[i];
- eq->hr_dev = hr_dev;
- eq->eqn = i;
- eq->irq = hr_dev->irq[i];
- eq->log_page_size = PAGE_SHIFT;
-
- if (i < hr_dev->caps.num_comp_vectors) {
- /* CEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_SHIFT_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->type_flag = HNS_ROCE_CEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->entries = hr_dev->caps.ceqe_depth[i];
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_ceqe);
- } else {
- /* AEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
- eq->type_flag = HNS_ROCE_AEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
- eq->entries = hr_dev->caps.aeqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_aeqe);
- }
- }
-
- /* Disable irq */
- hns_roce_int_mask_en(hr_dev);
-
- /* Configure CE irq interval and burst num */
- hns_roce_ce_int_default_cfg(hr_dev);
-
- for (i = 0; i < eq_num; i++) {
- ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
- if (ret) {
- dev_err(dev, "eq create failed\n");
- goto err_create_eq_fail;
- }
- }
-
- for (j = 0; j < eq_num; j++) {
- ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
- 0, hr_dev->irq_names[j], eq_table->eq + j);
- if (ret) {
- dev_err(dev, "request irq error!\n");
- goto err_request_irq_fail;
- }
- }
-
- for (i = 0; i < eq_num; i++)
- hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
-
- return 0;
-
-err_request_irq_fail:
- for (j = j - 1; j >= 0; j--)
- free_irq(eq_table->eq[j].irq, eq_table->eq + j);
-
-err_create_eq_fail:
- for (i = i - 1; i >= 0; i--)
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
-
- kfree(eq_table->eqc_base);
-
-err_eqc_base_alloc_fail:
- kfree(eq_table->eq);
-
- return ret;
-}
-
-void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
-{
- int i;
- int eq_num;
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- for (i = 0; i < eq_num; i++) {
- /* Disable EQ */
- hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
-
- free_irq(eq_table->eq[i].irq, eq_table->eq + i);
-
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
- }
-
- kfree(eq_table->eqc_base);
- kfree(eq_table->eq);
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
deleted file mode 100644
index c6d212d12e03..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_EQ_H
-#define _HNS_ROCE_EQ_H
-
-#define HNS_ROCE_CEQ 1
-#define HNS_ROCE_AEQ 2
-
-#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
-#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
-#define HNS_ROCE_CEQC_REG_OFFSET 0x18
-
-#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
-#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
-
-#define HNS_ROCE_INT_MASK_DISABLE 0
-#define HNS_ROCE_INT_MASK_ENABLE 1
-
-#define EQ_ENABLE 1
-#define EQ_DISABLE 0
-#define CONS_INDEX_MASK 0xffff
-
-#define CEQ_REG_OFFSET 0x18
-
-enum {
- HNS_ROCE_EQ_STAT_INVALID = 0,
- HNS_ROCE_EQ_STAT_VALID = 2,
-};
-
-struct hns_roce_aeqe {
- u32 asyn;
- union {
- struct {
- u32 qp;
- u32 rsv0;
- u32 rsv1;
- } qp_event;
-
- struct {
- u32 cq;
- u32 rsv0;
- u32 rsv1;
- } cq_event;
-
- struct {
- u32 port;
- u32 rsv0;
- u32 rsv1;
- } port_event;
-
- struct {
- u32 ceqe;
- u32 rsv0;
- u32 rsv1;
- } ce_event;
-
- struct {
- __le64 out_param;
- __le16 token;
- u8 status;
- u8 rsv0;
- } __packed cmd;
- } event;
-};
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
- (((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
- (((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
- (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
- (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
-
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
-
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
- (((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
-
-struct hns_roce_ceqe {
- union {
- int comp;
- } ceqe;
-};
-
-#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
-
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
-
-#endif /* _HNS_ROCE_EQ_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index af27168faf0f..21ca9fa7c9d1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -33,6 +33,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <rdma/ib_umem.h>
@@ -774,7 +775,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
goto create_lp_qp_failed;
}
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
IB_QPS_INIT, IB_QPS_RTR);
if (ret) {
dev_err(dev, "modify qp failed(%d)!\n", ret);
@@ -1492,9 +1493,9 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
caps->num_uars = HNS_ROCE_V1_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
- caps->num_aeq_vectors = HNS_ROCE_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
+ caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
@@ -1529,10 +1530,8 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_ports + 1;
}
- for (i = 0; i < caps->num_comp_vectors; i++)
- caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
-
- caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
+ caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
ROCEE_ACK_DELAY_REG));
caps->max_mtu = IB_MTU_2048;
@@ -2312,15 +2311,16 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immediate_data));
break;
case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
if (roce_get_bit(cqe->cqe_byte_4,
CQE_BYTE_4_IMM_INDICATOR_S)) {
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(
- cqe->immediate_data);
+ wc->ex.imm_data = cpu_to_be32(
+ le32_to_cpu(cqe->immediate_data));
} else {
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
@@ -3960,6 +3960,732 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
return ret;
}
+static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
+{
+ roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
+ (req_not << eq->log_entries), eq->doorbell);
+}
+
+static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SL_ERROR:
+ dev_warn(dev, "QP %d, SL error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_PORT_ERROR:
+ dev_warn(dev, "QP %d, port error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Access Violation Work Queue Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
+ dev_warn(dev, "SDB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
+ dev_warn(dev, "SDB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
+ dev_warn(dev, "ODB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
+ dev_warn(dev, "ODB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqes_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v1(eq))) {
+
+ /* Make sure we read the AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "PATH MIG not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "COMMUNICATION established\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "SQ DRAINED not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "PATH MIG failed\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ not support!\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
+ dev_warn(dev, "port change.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param
+ ));
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ 0x%lx overflow.\n",
+ roce_get_field(aeqe->event.ce_event.ceqe,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
+ break;
+ default:
+ dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ }
+
+ eq->cons_index++;
+ aeqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return aeqes_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+ int ceqes_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v1(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
+ dev_warn(&eq->hr_dev->pdev->dev,
+ "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return ceqes_found;
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* CEQ irq routine, CEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_ceq_int(hr_dev, eq);
+ else
+ /* AEQ irq routine, AEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = &hr_dev->pdev->dev;
+ int int_work = 0;
+ u32 caepaemask_val;
+ u32 cealmovf_val;
+ u32 caepaest_val;
+ u32 aeshift_val;
+ u32 ceshift_val;
+ u32 cemask_val;
+ int i;
+
+ /*
+ * Abnormal interrupt:
+ * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
+ * interrupt, mask irq, clear irq, cancel mask operation
+ */
+ aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+
+ /* AEQE overflow */
+ if (roce_get_bit(aeshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "AEQ overflow!\n");
+
+ /* Set mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
+ roce_set_bit(caepaest_val,
+ ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
+
+ /* Clear mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+ }
+
+ /* CEQ almost overflow */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ i * CEQ_REG_OFFSET);
+
+ if (roce_get_bit(ceshift_val,
+ ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
+ int_work++;
+
+ /* Set mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ cealmovf_val = roce_read(hr_dev,
+ ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cealmovf_val,
+ ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
+ 1);
+ roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET, cealmovf_val);
+
+ /* Clear mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+ }
+ }
+
+ /* ECC multi-bit error alarm */
+ dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
+
+ dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
+{
+ u32 aemask_val;
+ int masken = 0;
+ int i;
+
+ /* AEQ INT */
+ aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ masken);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
+
+ /* CEQ INT */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ /* IRQ mask */
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, masken);
+ }
+}
+
+static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+ int i;
+
+ if (!eq->buf_list)
+ return;
+
+ for (i = 0; i < npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
+ eq->buf_list[i].buf, eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+}
+
+static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
+ int enable_flag)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ u32 val;
+
+ val = readl(eqc);
+
+ if (enable_flag)
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_VALID);
+ else
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ writel(val, eqc);
+}
+
+static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t tmp_dma_addr;
+ u32 eqconsindx_val = 0;
+ u32 eqcuridx_val = 0;
+ u32 eqshift_val = 0;
+ int num_bas;
+ int ret;
+ int i;
+
+ num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
+ dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
+ (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
+ num_bas);
+ return -EINVAL;
+ }
+
+ eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
+ if (!eq->buf_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bas; ++i) {
+ eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
+ &tmp_dma_addr,
+ GFP_KERNEL);
+ if (!eq->buf_list[i].buf) {
+ ret = -ENOMEM;
+ goto err_out_free_pages;
+ }
+
+ eq->buf_list[i].map = tmp_dma_addr;
+ memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
+ }
+ eq->cons_index = 0;
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
+ eq->log_entries);
+ writel(eqshift_val, eqc);
+
+ /* Configure eq extended address 12~44bit */
+ writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
+
+ /*
+ * Configure eq extended address 45~49 bit.
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
+ eq->buf_list[0].map >> 44);
+ roce_set_field(eqcuridx_val,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ writel(eqcuridx_val, eqc + 8);
+
+ /* Configure eq consumer index */
+ roce_set_field(eqconsindx_val,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ writel(eqconsindx_val, eqc + 0xc);
+
+ return 0;
+
+err_out_free_pages:
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
+ eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+ return ret;
+}
+
+static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_eq *eq;
+ int irq_num;
+ int eq_num;
+ int ret;
+ int i, j;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
+ GFP_KERNEL);
+ if (!eq_table->eqc_base) {
+ ret = -ENOMEM;
+ goto err_eqc_base_alloc_fail;
+ }
+
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
+ eq->hr_dev = hr_dev;
+ eq->eqn = i;
+ eq->irq = hr_dev->irq[i];
+ eq->log_page_size = PAGE_SHIFT;
+
+ if (i < hr_dev->caps.num_comp_vectors) {
+ /* CEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ } else {
+ /* AEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ }
+ }
+
+ /* Disable irq */
+ hns_roce_v1_int_mask_enable(hr_dev);
+
+ /* Configure ce int interval */
+ roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_INTERVAL);
+
+ /* Configure ce int burst num */
+ roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
+
+ for (i = 0; i < eq_num; i++) {
+ ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
+ if (ret) {
+ dev_err(dev, "eq create failed\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ for (j = 0; j < irq_num; j++) {
+ if (j < eq_num)
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_eq, 0,
+ hr_dev->irq_names[j],
+ &eq_table->eq[j]);
+ else
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_abn, 0,
+ hr_dev->irq_names[j], hr_dev);
+
+ if (ret) {
+ dev_err(dev, "request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ for (i = 0; i < eq_num; i++)
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
+
+ return 0;
+
+err_request_irq_fail:
+ for (j -= 1; j >= 0; j--)
+ free_irq(hr_dev->irq[j], &eq_table->eq[j]);
+
+err_create_eq_fail:
+ for (i -= 1; i >= 0; i--)
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eqc_base);
+
+err_eqc_base_alloc_fail:
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+ for (i = 0; i < eq_num; i++) {
+ /* Disable EQ */
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
+
+ free_irq(hr_dev->irq[i], &eq_table->eq[i]);
+
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+ for (i = eq_num; i < irq_num; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ kfree(eq_table->eqc_base);
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile,
@@ -3983,6 +4709,8 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
+ .init_eq = hns_roce_v1_init_eq_table,
+ .cleanup_eq = hns_roce_v1_cleanup_eq_table,
};
static const struct of_device_id hns_roce_of_match[] = {
@@ -4060,10 +4788,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* get the mapped register base address */
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "memory resource not found!\n");
- return -EINVAL;
- }
hr_dev->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
@@ -4132,14 +4856,14 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* read the interrupt names from the DT or ACPI */
ret = device_property_read_string_array(dev, "interrupt-names",
hr_dev->irq_names,
- HNS_ROCE_MAX_IRQ_NUM);
+ HNS_ROCE_V1_MAX_IRQ_NUM);
if (ret < 0) {
dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
return ret;
}
/* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
if (hr_dev->irq[i] <= 0) {
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 21a07ef0afc9..b44ddd239060 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -60,8 +60,13 @@
#define HNS_ROCE_V1_GID_NUM 16
#define HNS_ROCE_V1_RESV_QP 8
-#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
-#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
+#define HNS_ROCE_V1_MAX_IRQ_NUM 34
+#define HNS_ROCE_V1_COMP_VEC_NUM 32
+#define HNS_ROCE_V1_AEQE_VEC_NUM 1
+#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1
+
+#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
+#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
@@ -159,6 +164,41 @@
#define SDB_INV_CNT_OFFSET 8
#define SDB_ST_CMP_VAL 8
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
+
+#define HNS_ROCE_INT_MASK_DISABLE 0
+#define HNS_ROCE_INT_MASK_ENABLE 1
+
+#define CEQ_REG_OFFSET 0x18
+
+#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
+
+#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0)
+
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24)
+
+#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0)
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25)
+
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0)
+
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
+
struct hns_roce_cq_context {
u32 cqc_byte_4;
u32 cq_bt_l;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8e18445714a9..256fe110107a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <net/addrconf.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
@@ -51,32 +52,106 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ void *wqe, unsigned int *sge_ind,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ int i;
+
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (rc_sq_wqe->msg_len > hr_dev->caps.max_sq_inline) {
+ *bad_wr = wr;
+ dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe += wr->sg_list[i].length;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
+ 1);
+ } else {
+ if (wr->num_sge <= 2) {
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+ } else {
+ roce_set_field(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < 2; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+
+ dseg = get_send_extend_sge(qp,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < wr->num_sge - 2; i++) {
+ if (likely(wr->sg_list[i + 2].length)) {
+ set_data_seg_v2(dseg,
+ wr->sg_list + 2 + i);
+ dseg++;
+ (*sge_ind)++;
+ }
+ }
+ }
+
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
unsigned int sge_ind = 0;
- unsigned int wqe_sz = 0;
unsigned int owner_bit;
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
+ bool loopback;
int ret = 0;
+ u8 *smac;
int nreq;
int i;
- if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+ if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+ ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_UD)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
*bad_wr = NULL;
return -EOPNOTSUPP;
}
- if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+ if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
+ qp->state == IB_QPS_RTR)) {
dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
*bad_wr = wr;
return -EINVAL;
@@ -106,161 +181,255 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->wr_id;
owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
- rc_sq_wqe = wqe;
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
- for (i = 0; i < wr->num_sge; i++)
- rc_sq_wqe->msg_len += wr->sg_list[i].length;
- rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ ud_sq_wqe = wqe;
+ memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
+ V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
+ V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
+ V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
+ V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
+ ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
+ ah->av.mac[5]);
+
+ /* MAC loopback */
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac,
+ smac) ? 1 : 0;
+
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
+
+ roce_set_field(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
- (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+ for (i = 0; i < wr->num_sge; i++)
+ ud_sq_wqe->msg_len += wr->sg_list[i].length;
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+ ud_sq_wqe->immtdata = send_ieth(wr);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ /* Set sig attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
- owner_bit);
+ /* Set se attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_READ);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_PD_M,
+ V2_UD_SEND_WQE_BYTE_16_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+
+ roce_set_field(ud_sq_wqe->byte_20,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ roce_set_field(ud_sq_wqe->byte_24,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+ ud_sq_wqe->qkey =
+ cpu_to_be32(ud_wr(wr)->remote_qkey & 0x80000000) ?
+ qp->qkey : ud_wr(wr)->remote_qkey;
+ roce_set_field(ud_sq_wqe->byte_32,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_S,
+ ud_wr(wr)->remote_qpn);
+
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S,
+ ah->av.vlan);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
+ ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_SL_M,
+ V2_UD_SEND_WQE_BYTE_40_SL_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_SL_SHIFT);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_M,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_S,
+ qp->port);
+
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
+ hns_get_gid_index(hr_dev, qp->phy_port,
+ ah->av.gid_index));
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
+ GID_LEN_V2);
+
+ dseg = get_send_extend_sge(qp,
+ sge_ind & (qp->sge.sge_cnt - 1));
+ for (i = 0; i < wr->num_sge; i++) {
+ set_data_seg_v2(dseg + i, wr->sg_list + i);
+ sge_ind++;
+ }
+
+ ind++;
+ } else if (ibqp->qp_type == IB_QPT_RC) {
+ rc_sq_wqe = wqe;
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+ for (i = 0; i < wr->num_sge; i++)
+ rc_sq_wqe->msg_len += wr->sg_list[i].length;
+
+ rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_SEND:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
- break;
- case IB_WR_SEND_WITH_INV:
- roce_set_field(rc_sq_wqe->byte_4,
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
- break;
- case IB_WR_SEND_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
- break;
- case IB_WR_LOCAL_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_LOCAL_INV);
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
- break;
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
- break;
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
- break;
- case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
- break;
- default:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_MASK);
- break;
- }
-
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- dseg = wqe;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
- if (rc_sq_wqe->msg_len >
- hr_dev->caps.max_sq_inline) {
- ret = -EINVAL;
- *bad_wr = wr;
- dev_err(dev, "inline len(1-%d)=%d, illegal",
- rc_sq_wqe->msg_len,
- hr_dev->caps.max_sq_inline);
- goto out;
+ break;
+ default:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_MASK);
+ break;
}
- for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *)wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
- wqe_sz += wr->sg_list[i].length;
- }
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ dseg = wqe;
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+ ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
+ &sge_ind, bad_wr);
+ if (ret)
+ goto out;
+ ind++;
} else {
- if (wr->num_sge <= 2) {
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
- } else {
- roce_set_field(rc_sq_wqe->byte_20,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < 2; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
-
- dseg = get_send_extend_sge(qp,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < wr->num_sge - 2; i++) {
- set_data_seg_v2(dseg + i,
- wr->sg_list + 2 + i);
- sge_ind++;
- }
- }
-
- roce_set_field(rc_sq_wqe->byte_16,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
- wr->num_sge);
- wqe_sz += wr->num_sge *
- sizeof(struct hns_roce_v2_wqe_data_seg);
+ dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+ return -EOPNOTSUPP;
}
- ind++;
}
out:
@@ -299,6 +468,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db rq_db;
unsigned long flags;
@@ -347,6 +517,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dseg[i].addr = 0;
}
+ /* rq support inline data */
+ sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
+
hr_qp->rq.wrid[ind] = wr->wr_id;
ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
@@ -908,9 +1086,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
- caps->num_aeq_vectors = 1;
- caps->num_comp_vectors = 63;
- caps->num_other_vectors = 0;
+ caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
@@ -955,12 +1133,18 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->cqe_ba_pg_sz = 0;
caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
- HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+ HNS_ROCE_CAP_FLAG_RQ_INLINE;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
@@ -1382,6 +1566,8 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
+ V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
@@ -1422,6 +1608,15 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M,
+ V2_CQC_BYTE_56_CQ_PERIOD_S,
+ HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
}
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
@@ -1457,6 +1652,40 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
return 0;
}
+static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
+ struct hns_roce_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct hns_roce_rinl_sge *sge_list;
+ u32 wr_num, wr_cnt, sge_num;
+ u32 sge_cnt, data_len, size;
+ void *wqe_buf;
+
+ wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
+ wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+
+ sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+ sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+ wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
+ data_len = wc->byte_len;
+
+ for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
+ size = min(sge_list[sge_cnt].len, data_len);
+ memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
+
+ data_len -= size;
+ wqe_buf += size;
+ }
+
+ if (data_len) {
+ wc->status = IB_WC_LOC_LEN_ERR;
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -1469,6 +1698,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
u32 opcode;
u32 status;
int qpn;
+ int ret;
/* Find cqe according to consumer index */
cqe = next_cqe_sw_v2(hr_cq);
@@ -1636,7 +1866,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
@@ -1645,18 +1875,29 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+ wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
+ if ((wc->qp->qp_type == IB_QPT_RC ||
+ wc->qp->qp_type == IB_QPT_UC) &&
+ (opcode == HNS_ROCE_V2_OPCODE_SEND ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+ (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
+ ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
+ if (ret)
+ return -EAGAIN;
+ }
+
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
@@ -1670,6 +1911,21 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
+ wc->port_num = roce_get_field(cqe->byte_32,
+ V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
+ wc->pkey_index = 0;
+ memcpy(wc->smac, cqe->smac, 4);
+ wc->smac[4] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_4_M,
+ V2_CQE_BYTE_28_SMAC_4_S);
+ wc->smac[5] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_5_M,
+ V2_CQE_BYTE_28_SMAC_5_S);
+ wc->vlan_id = 0xffff;
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+ wc->network_hdr_type = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_PORT_TYPE_M,
+ V2_CQE_BYTE_28_PORT_TYPE_S);
}
return 0;
@@ -1859,8 +2115,39 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static void set_access_flags(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ const struct ib_qp_attr *attr, int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ dest_rd_atomic = !!(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
+ attr->max_dest_rd_atomic : hr_qp->resp_depth;
+
+ access_flags = !!(attr_mask & IB_QP_ACCESS_FLAGS) ?
+ attr->qp_access_flags : hr_qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
+ int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -1877,9 +2164,18 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -1944,18 +2240,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ hr_qp->qkey = attr->qkey;
+ }
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -2176,9 +2467,17 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -2239,7 +2538,7 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
@@ -2255,10 +2554,10 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
}
- if (attr_mask & IB_QP_PKEY_INDEX)
- context->qkey_xrcd = attr->pkey_index;
- else
- context->qkey_xrcd = hr_qp->pkey_index;
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ }
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
@@ -2354,7 +2653,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
+ hr_dev->caps.mtt_hop_num : 0);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -2463,11 +2763,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
}
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S, 0);
+ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+ attr->max_dest_rd_atomic) {
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ fls(attr->max_dest_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+ }
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
@@ -2511,8 +2814,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
+ else
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
@@ -2557,12 +2865,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S, 0);
-
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
@@ -2625,13 +2927,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- context->sq_cur_sge_blk_addr = hr_qp->sq.max_gs > 2 ?
+ context->sq_cur_sge_blk_addr =
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
((u32)(mtts[hr_qp->sge.offset / page_size]
>> PAGE_ADDR_SHIFT)) : 0;
roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- hr_qp->sq.max_gs > 2 ?
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
(mtts[hr_qp->sge.offset / page_size] >>
(32 + PAGE_ADDR_SHIFT)) : 0);
qpc_mask->sq_cur_sge_blk_addr = 0;
@@ -2766,6 +3069,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ fls(attr->max_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+ }
return 0;
}
@@ -2794,7 +3105,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
*/
memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
+ qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -2829,6 +3141,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
}
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
/* Every status migrate must change state */
roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
@@ -2845,6 +3160,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ hr_qp->atomic_rd_en = attr->qp_access_flags;
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
@@ -3098,6 +3416,11 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ }
+
return 0;
}
@@ -3162,6 +3485,1146 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+{
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+ doorbell[1] = 0;
+
+ if (eq->type_flag == HNS_ROCE_AEQ) {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_AEQ :
+ HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
+ } else {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
+ HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_CEQ :
+ HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
+ }
+
+ roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
+ HNS_ROCE_V2_EQ_DB_PARA_S,
+ (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+
+ hns_roce_write64_k(doorbell, eq->doorbell);
+}
+
+static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local work queue catastrophic error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local access violation work queue error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "Communication established.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "Send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid request local work queue error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 cqn;
+
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe;
+
+ if (!eq->hop_num)
+ aeqe = get_aeqe_v2(eq, eq->cons_index);
+ else
+ aeqe = mhop_get_aeqe(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqe_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v2(eq))) {
+
+ /* Make sure we read AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "Path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "Path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ dev_warn(dev, "SRQ not support.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ dev_warn(dev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param));
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ dev_warn(dev, "Function level reset.\n");
+ break;
+ default:
+ dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ };
+
+ ++eq->cons_index;
+ aeqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+ return aeqe_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
+ buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+
+ if (!eq->hop_num)
+ ceqe = get_ceqe_v2(eq, eq->cons_index);
+ else
+ ceqe = mhop_get_ceqe(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_ceqe *ceqe;
+ int ceqe_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v2(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_V2_CEQE_COMP_CQN_M,
+ HNS_ROCE_V2_CEQE_COMP_CQN_S);
+
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+
+ return ceqe_found;
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* Completion event interrupt */
+ int_work = hns_roce_v2_ceq_int(hr_dev, eq);
+ else
+ /* Asychronous event interrupt */
+ int_work = hns_roce_v2_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = hr_dev->dev;
+ int int_work = 0;
+ u32 int_st;
+ u32 int_en;
+
+ /* Abnormal interrupt */
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+ dev_err(dev, "AEQ overflow!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
+ dev_err(dev, "BUS ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
+ dev_err(dev, "OTHER ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else
+ dev_err(dev, "There is no abnormal irq found!\n");
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+ int eq_num, int enable_flag)
+{
+ int i;
+
+ if (enable_flag == EQ_ENABLE) {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M);
+ } else {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
+ }
+}
+
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (eqn < hr_dev->caps.num_comp_vectors)
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_CEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ else
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_AEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret)
+ dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+}
+
+static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ u64 idx;
+ u64 size;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ int eqe_alloc;
+ int ba_num;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
+ buf_chk_sz;
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ dma_free_coherent(dev, (unsigned int)(eq->entries *
+ eq->eqe_size), eq->bt_l0, eq->l0_dma);
+ return;
+ }
+
+ /* hop_num = 1 or hop = 2 */
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ if (mhop_num == 1) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ if (i == eq->l0_last_num - 1) {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ dma_free_coherent(dev, size, eq->buf[i],
+ eq->buf_dma[i]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ }
+ } else if (mhop_num == 2) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * (bt_chk_sz / 8) + j;
+ if ((i == eq->l0_last_num - 1)
+ && j == eq->l1_last_num - 1) {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ dma_free_coherent(dev, size,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+ kfree(eq->buf_dma);
+ kfree(eq->buf);
+ kfree(eq->l1_dma);
+ kfree(eq->bt_l1);
+ eq->buf_dma = NULL;
+ eq->buf = NULL;
+ eq->l1_dma = NULL;
+ eq->bt_l1 = NULL;
+}
+
+static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ u32 buf_chk_sz;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ if (hr_dev->caps.eqe_hop_num) {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ return;
+ }
+
+ if (eq->buf_list)
+ dma_free_coherent(hr_dev->dev, buf_chk_sz,
+ eq->buf_list->buf, eq->buf_list->map);
+}
+
+static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ void *mb_buf)
+{
+ struct hns_roce_eq_context *eqc;
+
+ eqc = mb_buf;
+ memset(eqc, 0, sizeof(struct hns_roce_eq_context));
+
+ /* init eqc */
+ eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
+ eq->cons_index = 0;
+ eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+ eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+ eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+ eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
+ eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
+ eq->shift = ilog2((unsigned int)eq->entries);
+
+ if (!eq->hop_num)
+ eq->eqe_ba = eq->buf_list->map;
+ else
+ eq->eqe_ba = eq->l0_dma;
+
+ /* set eqc state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQ_ST_M,
+ HNS_ROCE_EQC_EQ_ST_S,
+ HNS_ROCE_V2_EQ_STATE_VALID);
+
+ /* set eqe hop num */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_HOP_NUM_M,
+ HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
+
+ /* set eqc over_ignore */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_OVER_IGNORE_M,
+ HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
+
+ /* set eqc coalesce */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_COALESCE_M,
+ HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
+
+ /* set eqc arm_state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_ARM_ST_M,
+ HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
+
+ /* set eqn */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQN_M,
+ HNS_ROCE_EQC_EQN_S, eq->eqn);
+
+ /* set eqe_cnt */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQE_CNT_M,
+ HNS_ROCE_EQC_EQE_CNT_S,
+ HNS_ROCE_EQ_INIT_EQE_CNT);
+
+ /* set eqe_ba_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BA_PG_SZ_M,
+ HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
+
+ /* set eqe_buf_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BUF_PG_SZ_M,
+ HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
+
+ /* set eq_producer_idx */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_PROD_INDX_M,
+ HNS_ROCE_EQC_PROD_INDX_S,
+ HNS_ROCE_EQ_INIT_PROD_IDX);
+
+ /* set eq_max_cnt */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_MAX_CNT_M,
+ HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
+
+ /* set eq_period */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_PERIOD_M,
+ HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
+
+ /* set eqe_report_timer */
+ roce_set_field(eqc->eqe_report_timer,
+ HNS_ROCE_EQC_REPORT_TIMER_M,
+ HNS_ROCE_EQC_REPORT_TIMER_S,
+ HNS_ROCE_EQ_INIT_REPORT_TIMER);
+
+ /* set eqe_ba [34:3] */
+ roce_set_field(eqc->eqe_ba0,
+ HNS_ROCE_EQC_EQE_BA_L_M,
+ HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+
+ /* set eqe_ba [64:35] */
+ roce_set_field(eqc->eqe_ba1,
+ HNS_ROCE_EQC_EQE_BA_H_M,
+ HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+
+ /* set eq shift */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_SHIFT_M,
+ HNS_ROCE_EQC_SHIFT_S, eq->shift);
+
+ /* set eq MSI_IDX */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_MSI_INDX_M,
+ HNS_ROCE_EQC_MSI_INDX_S,
+ HNS_ROCE_EQ_INIT_MSI_IDX);
+
+ /* set cur_eqe_ba [27:12] */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+
+ /* set cur_eqe_ba [59:28] */
+ roce_set_field(eqc->byte_32,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+
+ /* set cur_eqe_ba [63:60] */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+
+ /* set eq consumer idx */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CONS_INDX_M,
+ HNS_ROCE_EQC_CONS_INDX_S,
+ HNS_ROCE_EQ_INIT_CONS_IDX);
+
+ /* set nex_eqe_ba[43:12] */
+ roce_set_field(eqc->nxt_eqe_ba0,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+
+ /* set nex_eqe_ba[63:44] */
+ roce_set_field(eqc->nxt_eqe_ba1,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
+}
+
+static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ int eq_alloc_done = 0;
+ int eq_buf_cnt = 0;
+ int eqe_alloc;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ u64 size;
+ u64 idx;
+ int ba_num;
+ int bt_num;
+ int record_i;
+ int record_j;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
+ / buf_chk_sz;
+ bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ if (eq->entries > buf_chk_sz / eq->eqe_size) {
+ dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
+ eq->entries);
+ return -EINVAL;
+ }
+ eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
+ &(eq->l0_dma), GFP_KERNEL);
+ if (!eq->bt_l0)
+ return -ENOMEM;
+
+ eq->cur_eqe_ba = eq->l0_dma;
+ eq->nxt_eqe_ba = 0;
+
+ memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
+
+ return 0;
+ }
+
+ eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
+ if (!eq->buf_dma)
+ return -ENOMEM;
+ eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
+ if (!eq->buf)
+ goto err_kcalloc_buf;
+
+ if (mhop_num == 2) {
+ eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
+ if (!eq->l1_dma)
+ goto err_kcalloc_l1_dma;
+
+ eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
+ if (!eq->bt_l1)
+ goto err_kcalloc_bt_l1;
+ }
+
+ /* alloc L0 BT */
+ eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
+ if (!eq->bt_l0)
+ goto err_dma_alloc_l0;
+
+ if (mhop_num == 1) {
+ if (ba_num > (bt_chk_sz / 8))
+ dev_err(dev, "ba_num %d is too large for 1 hop\n",
+ ba_num);
+
+ /* alloc buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ }
+ eq->buf[i] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[i]),
+ GFP_KERNEL);
+ if (!eq->buf[i])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[i], 0, size);
+ *(eq->bt_l0 + i) = eq->buf_dma[i];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+
+ } else if (mhop_num == 2) {
+ /* alloc L1 BT and buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
+ &(eq->l1_dma[i]),
+ GFP_KERNEL);
+ if (!eq->bt_l1[i])
+ goto err_dma_alloc_l1;
+ *(eq->bt_l0 + i) = eq->l1_dma[i];
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ }
+ eq->buf[idx] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[idx]),
+ GFP_KERNEL);
+ if (!eq->buf[idx])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[idx], 0, size);
+ *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num) {
+ eq_alloc_done = 1;
+ break;
+ }
+ }
+
+ if (eq_alloc_done)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+ }
+
+ eq->l0_last_num = i + 1;
+ if (mhop_num == 2)
+ eq->l1_last_num = j + 1;
+
+ return 0;
+
+err_dma_alloc_l1:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+ for (i -= 1; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ goto err_dma_alloc_l0;
+
+err_dma_alloc_buf:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+
+ if (mhop_num == 1)
+ for (i -= i; i >= 0; i--)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ else if (mhop_num == 2) {
+ record_i = i;
+ record_j = j;
+ for (; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ if (i == record_i && j >= record_j)
+ break;
+
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+
+err_dma_alloc_l0:
+ kfree(eq->bt_l1);
+ eq->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(eq->l1_dma);
+ eq->l1_dma = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(eq->buf);
+ eq->buf = NULL;
+
+err_kcalloc_buf:
+ kfree(eq->buf_dma);
+ eq->buf_dma = NULL;
+
+ return -ENOMEM;
+}
+
+static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ unsigned int eq_cmd)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u32 buf_chk_sz = 0;
+ int ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (!hr_dev->caps.eqe_hop_num) {
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+
+ eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
+ GFP_KERNEL);
+ if (!eq->buf_list) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+
+ eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
+ &(eq->buf_list->map),
+ GFP_KERNEL);
+ if (!eq->buf_list->buf) {
+ ret = -ENOMEM;
+ goto err_alloc_buf;
+ }
+
+ memset(eq->buf_list->buf, 0, buf_chk_sz);
+ } else {
+ ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+ }
+
+ hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
+ eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ dev_err(dev, "[mailbox cmd] creat eqc failed.\n");
+ goto err_cmd_mbox;
+ }
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_cmd_mbox:
+ if (!hr_dev->caps.eqe_hop_num)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
+ eq->buf_list->map);
+ else {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ goto free_cmd_mbox;
+ }
+
+err_alloc_buf:
+ kfree(eq->buf_list);
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_eq *eq;
+ unsigned int eq_cmd;
+ int irq_num;
+ int eq_num;
+ int other_num;
+ int comp_num;
+ int aeq_num;
+ int i, j, k;
+ int ret;
+
+ other_num = hr_dev->caps.num_other_vectors;
+ comp_num = hr_dev->caps.num_comp_vectors;
+ aeq_num = hr_dev->caps.num_aeq_vectors;
+
+ eq_num = comp_num + aeq_num;
+ irq_num = eq_num + other_num;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_num; i++) {
+ hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+ GFP_KERNEL);
+ if (!hr_dev->irq_names[i]) {
+ ret = -ENOMEM;
+ goto err_failed_kzalloc;
+ }
+ }
+
+ /* create eq */
+ for (j = 0; j < eq_num; j++) {
+ eq = &eq_table->eq[j];
+ eq->hr_dev = hr_dev;
+ eq->eqn = j;
+ if (j < comp_num) {
+ /* CEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j + other_num + aeq_num];
+ eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
+ } else {
+ /* AEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j - comp_num + other_num];
+ eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
+ }
+
+ ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
+ if (ret) {
+ dev_err(dev, "eq create failed.\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ /* enable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
+
+ /* irq contains: abnormal + AEQ + CEQ*/
+ for (k = 0; k < irq_num; k++)
+ if (k < other_num)
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
+ else if (k < (other_num + aeq_num))
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
+ k - other_num);
+ else
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
+ k - other_num - aeq_num);
+
+ for (k = 0; k < irq_num; k++) {
+ if (k < other_num)
+ ret = request_irq(hr_dev->irq[k],
+ hns_roce_v2_msix_interrupt_abn,
+ 0, hr_dev->irq_names[k], hr_dev);
+
+ else if (k < (other_num + comp_num))
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k + aeq_num],
+ &eq_table->eq[k - other_num]);
+ else
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k - comp_num],
+ &eq_table->eq[k - other_num]);
+ if (ret) {
+ dev_err(dev, "Request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ return 0;
+
+err_request_irq_fail:
+ for (k -= 1; k >= 0; k--)
+ if (k < other_num)
+ free_irq(hr_dev->irq[k], hr_dev);
+ else
+ free_irq(eq_table->eq[k - other_num].irq,
+ &eq_table->eq[k - other_num]);
+
+err_create_eq_fail:
+ for (j -= 1; j >= 0; j--)
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
+
+err_failed_kzalloc:
+ for (i -= 1; i >= 0; i--)
+ kfree(hr_dev->irq_names[i]);
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ /* Disable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
+
+ for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ for (i = 0; i < eq_num; i++) {
+ hns_roce_v2_destroy_eqc(hr_dev, i);
+
+ free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
+
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+
+ for (i = 0; i < irq_num; i++)
+ kfree(hr_dev->irq_names[i]);
+
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit,
@@ -3183,6 +4646,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.post_recv = hns_roce_v2_post_recv,
.req_notify_cq = hns_roce_v2_req_notify_cq,
.poll_cq = hns_roce_v2_poll_cq,
+ .init_eq = hns_roce_v2_init_eq_table,
+ .cleanup_eq = hns_roce_v2_cleanup_eq_table,
};
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
@@ -3197,6 +4662,7 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct pci_device_id *id;
+ int i;
id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
if (!id) {
@@ -3214,8 +4680,15 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0;
+ addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
+ hr_dev->iboe.netdevs[0]->dev_addr);
+
+ for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+ hr_dev->irq[i] = pci_irq_vector(handle->pdev,
+ i + handle->rinfo.base_vector);
+
/* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 0;
+ hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 04b7a51b8efb..960df095392a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -53,6 +53,10 @@
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
+#define HNS_ROCE_V2_MAX_IRQ_NUM 65
+#define HNS_ROCE_V2_COMP_VEC_NUM 63
+#define HNS_ROCE_V2_AEQE_VEC_NUM 1
+#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
@@ -78,6 +82,8 @@
#define HNS_ROCE_MTT_HOP_NUM 1
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
+#define HNS_ROCE_EQE_HOP_NUM 2
+
#define HNS_ROCE_V2_GID_INDEX_NUM 256
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
@@ -105,6 +111,12 @@
(step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2))
+enum {
+ NO_ARMED = 0x0,
+ REG_NXT_CEQE = 0x2,
+ REG_NXT_SE_CEQE = 0x3
+};
+
#define V2_CQ_DB_REQ_NOT_SOL 0
#define V2_CQ_DB_REQ_NOT 1
@@ -229,6 +241,9 @@ struct hns_roce_v2_cq_context {
u32 cqe_report_timer;
u32 byte_64_se_cqe_idx;
};
+#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
+
#define V2_CQC_BYTE_4_CQ_ST_S 0
#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
@@ -747,11 +762,14 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_cqe {
u32 byte_4;
- u32 rkey_immtdata;
+ union {
+ __le32 rkey;
+ __be32 immtdata;
+ };
u32 byte_12;
u32 byte_16;
u32 byte_cnt;
- u32 smac;
+ u8 smac[4];
u32 byte_28;
u32 byte_32;
};
@@ -901,6 +919,90 @@ struct hns_roce_v2_cq_db {
#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+struct hns_roce_v2_ud_send_wqe {
+ u32 byte_4;
+ u32 msg_len;
+ u32 immtdata;
+ u32 byte_16;
+ u32 byte_20;
+ u32 byte_24;
+ u32 qkey;
+ u32 byte_32;
+ u32 byte_36;
+ u32 byte_40;
+ u32 dmac;
+ u32 byte_48;
+ u8 dgid[GID_LEN_V2];
+
+};
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7
+
+#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8
+
+#define V2_UD_SEND_WQE_BYTE_4_SE_S 11
+
+#define V2_UD_SEND_WQE_BYTE_16_PD_S 0
+#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16)
+
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0)
+
+#define V2_UD_SEND_WQE_BYTE_40_SL_S 20
+#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20)
+
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
+
+#define V2_UD_SEND_WQE_DMAC_0_S 0
+#define V2_UD_SEND_WQE_DMAC_0_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_DMAC_1_S 8
+#define V2_UD_SEND_WQE_DMAC_1_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_DMAC_2_S 16
+#define V2_UD_SEND_WQE_DMAC_2_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_DMAC_3_S 24
+#define V2_UD_SEND_WQE_DMAC_3_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_S 0
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_S 8
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S 16
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_S 24
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24)
+
struct hns_roce_v2_rc_send_wqe {
u32 byte_4;
u32 msg_len;
@@ -1129,9 +1231,6 @@ struct hns_roce_cmq_desc {
u32 data[6];
};
-#define ROCEE_VF_MB_CFG0_REG 0x40
-#define ROCEE_VF_MB_STATUS_REG 0x58
-
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
@@ -1174,4 +1273,178 @@ struct hns_roce_v2_priv {
struct hns_roce_v2_cmq cmq;
};
+struct hns_roce_eq_context {
+ u32 byte_4;
+ u32 byte_8;
+ u32 byte_12;
+ u32 eqe_report_timer;
+ u32 eqe_ba0;
+ u32 eqe_ba1;
+ u32 byte_28;
+ u32 byte_32;
+ u32 byte_36;
+ u32 nxt_eqe_ba0;
+ u32 nxt_eqe_ba1;
+ u32 rsv[5];
+};
+
+#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x0
+
+#define HNS_ROCE_V2_EQ_STATE_INVALID 0
+#define HNS_ROCE_V2_EQ_STATE_VALID 1
+#define HNS_ROCE_V2_EQ_STATE_OVERFLOW 2
+#define HNS_ROCE_V2_EQ_STATE_FAILURE 3
+
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_0 0
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_1 1
+
+#define HNS_ROCE_V2_EQ_COALESCE_0 0
+#define HNS_ROCE_V2_EQ_COALESCE_1 1
+
+#define HNS_ROCE_V2_EQ_FIRED 0
+#define HNS_ROCE_V2_EQ_ARMED 1
+#define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3
+
+#define HNS_ROCE_EQ_INIT_EQE_CNT 0
+#define HNS_ROCE_EQ_INIT_PROD_IDX 0
+#define HNS_ROCE_EQ_INIT_REPORT_TIMER 0
+#define HNS_ROCE_EQ_INIT_MSI_IDX 0
+#define HNS_ROCE_EQ_INIT_CONS_IDX 0
+#define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0
+
+#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31
+#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31
+
+#define HNS_ROCE_V2_COMP_EQE_NUM 0x1000
+#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
+
+#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
+#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
+#define HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S 2
+
+#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
+#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
+#define HNS_ROCE_EQ_DB_CMD_CEQ 0x2
+#define HNS_ROCE_EQ_DB_CMD_CEQ_ARMED 0x3
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+
+#define EQ_REG_OFFSET 0x4
+
+#define HNS_ROCE_INT_NAME_LEN 32
+#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_CONS_IDX_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
+#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
+
+/* WORD0 */
+#define HNS_ROCE_EQC_EQ_ST_S 0
+#define HNS_ROCE_EQC_EQ_ST_M GENMASK(1, 0)
+
+#define HNS_ROCE_EQC_HOP_NUM_S 2
+#define HNS_ROCE_EQC_HOP_NUM_M GENMASK(3, 2)
+
+#define HNS_ROCE_EQC_OVER_IGNORE_S 4
+#define HNS_ROCE_EQC_OVER_IGNORE_M GENMASK(4, 4)
+
+#define HNS_ROCE_EQC_COALESCE_S 5
+#define HNS_ROCE_EQC_COALESCE_M GENMASK(5, 5)
+
+#define HNS_ROCE_EQC_ARM_ST_S 6
+#define HNS_ROCE_EQC_ARM_ST_M GENMASK(7, 6)
+
+#define HNS_ROCE_EQC_EQN_S 8
+#define HNS_ROCE_EQC_EQN_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_EQE_CNT_S 16
+#define HNS_ROCE_EQC_EQE_CNT_M GENMASK(31, 16)
+
+/* WORD1 */
+#define HNS_ROCE_EQC_BA_PG_SZ_S 0
+#define HNS_ROCE_EQC_BA_PG_SZ_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_BUF_PG_SZ_S 4
+#define HNS_ROCE_EQC_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define HNS_ROCE_EQC_PROD_INDX_S 8
+#define HNS_ROCE_EQC_PROD_INDX_M GENMASK(31, 8)
+
+/* WORD2 */
+#define HNS_ROCE_EQC_MAX_CNT_S 0
+#define HNS_ROCE_EQC_MAX_CNT_M GENMASK(15, 0)
+
+#define HNS_ROCE_EQC_PERIOD_S 16
+#define HNS_ROCE_EQC_PERIOD_M GENMASK(31, 16)
+
+/* WORD3 */
+#define HNS_ROCE_EQC_REPORT_TIMER_S 0
+#define HNS_ROCE_EQC_REPORT_TIMER_M GENMASK(31, 0)
+
+/* WORD4 */
+#define HNS_ROCE_EQC_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD5 */
+#define HNS_ROCE_EQC_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_EQE_BA_H_M GENMASK(28, 0)
+
+/* WORD6 */
+#define HNS_ROCE_EQC_SHIFT_S 0
+#define HNS_ROCE_EQC_SHIFT_M GENMASK(7, 0)
+
+#define HNS_ROCE_EQC_MSI_INDX_S 8
+#define HNS_ROCE_EQC_MSI_INDX_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_S 16
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_M GENMASK(31, 16)
+
+/* WORD7 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_M GENMASK(31, 0)
+
+/* WORD8 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_CONS_INDX_S 8
+#define HNS_ROCE_EQC_CONS_INDX_M GENMASK(31, 8)
+
+/* WORD9 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD10 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
+
+#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
+#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
+
+#define HNS_ROCE_V2_EQ_DB_CMD_S 16
+#define HNS_ROCE_V2_EQ_DB_CMD_M GENMASK(17, 16)
+
+#define HNS_ROCE_V2_EQ_DB_TAG_S 0
+#define HNS_ROCE_V2_EQ_DB_TAG_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_EQ_DB_PARA_S 0
+#define HNS_ROCE_V2_EQ_DB_PARA_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index cf02ac2d3596..aa0c242ddc50 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -748,12 +748,10 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
goto error_failed_cmd_init;
}
- if (hr_dev->cmd_mod) {
- ret = hns_roce_init_eq_table(hr_dev);
- if (ret) {
- dev_err(dev, "eq init failed!\n");
- goto error_failed_eq_table;
- }
+ ret = hr_dev->hw->init_eq(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
}
if (hr_dev->cmd_mod) {
@@ -805,8 +803,7 @@ error_failed_init_hem:
hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev);
@@ -837,8 +834,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev);
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 49586ec8126a..4414cea9ef56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -65,6 +65,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_event);
static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
enum hns_roce_event type)
@@ -454,6 +455,13 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
+ /* ud sqwqe's sge use extend sge */
+ if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ hr_qp->sq.max_gs);
+ hr_qp->sge.sge_shift = 4;
+ }
+
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@@ -493,6 +501,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
int ret = 0;
u32 page_shift;
u32 npages;
+ int i;
mutex_init(&hr_qp->mutex);
spin_lock_init(&hr_qp->sq.lock);
@@ -500,6 +509,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state = IB_QPS_RESET;
+ hr_qp->ibqp.qp_type = init_attr->qp_type;
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
else
@@ -512,18 +523,48 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ /* allocate recv inline buf */
+ hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
+ sizeof(struct hns_roce_rinl_wqe),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
+
+ /* Firstly, allocate a list of sge space buffer */
+ hr_qp->rq_inl_buf.wqe_list[0].sg_list =
+ kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
+ init_attr->cap.max_recv_sge *
+ sizeof(struct hns_roce_rinl_sge),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
+ ret = -ENOMEM;
+ goto err_wqe_list;
+ }
+
+ for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
+ /* Secondly, reallocate the buffer */
+ hr_qp->rq_inl_buf.wqe_list[i].sg_list =
+ &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
+ init_attr->cap.max_recv_sge];
+ }
+
if (ib_pd->uobject) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT;
- goto err_out;
+ goto err_rq_sge_list;
}
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
@@ -532,7 +573,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -566,13 +607,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
/* Set SQ size */
@@ -580,7 +621,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
- goto err_out;
+ goto err_rq_sge_list;
}
/* QP doorbell register address */
@@ -596,7 +637,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
&hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -678,6 +719,14 @@ err_buf:
else
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+err_rq_sge_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+
+err_wqe_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+
err_out:
return ret;
}
@@ -724,8 +773,13 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
+
+ /* when hw version is v1, the sqpn is allocated */
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
+ else
+ hr_qp->ibqp.qp_num = 1;
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp->ibqp.qp_num, hr_qp);
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index f6d20ba88c03..2962979c06e9 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -5,4 +5,3 @@ config INFINIBAND_I40IW
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
- INET && I40IW && INFINIBAND && I40E
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 4ae9131b6350..bcddd7061fc0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -587,5 +587,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
int i40iw_net_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 77870f9e1736..abf4cd897849 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -92,14 +92,9 @@ void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
{
u8 encoded_ird_size;
- u8 pof2_cm_ird = 1;
-
- /* round-off to next powerof2 */
- while (pof2_cm_ird < cm_ird)
- pof2_cm_ird *= 2;
/* ird_size field is encoded in qp_ctx */
- switch (pof2_cm_ird) {
+ switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {
case I40IW_HW_IRD_SETTING_64:
encoded_ird_size = 3;
break;
@@ -125,13 +120,16 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
* @conn_ird: connection IRD
* @conn_ord: connection ORD
*/
-static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
+static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
{
if (conn_ird > I40IW_MAX_IRD_SIZE)
conn_ird = I40IW_MAX_IRD_SIZE;
if (conn_ord > I40IW_MAX_ORD_SIZE)
conn_ord = I40IW_MAX_ORD_SIZE;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
cm_node->ird_size = conn_ird;
cm_node->ord_size = conn_ord;
@@ -2878,15 +2876,13 @@ static struct i40iw_cm_listener *i40iw_make_listen_node(
* i40iw_create_cm_node - make a connection node with params
* @cm_core: cm's core
* @iwdev: iwarp device structure
- * @private_data_len: len to provate data for mpa request
- * @private_data: pointer to private data for connection
+ * @conn_param: upper layer connection parameters
* @cm_info: quad info for connection
*/
static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_core *cm_core,
struct i40iw_device *iwdev,
- u16 private_data_len,
- void *private_data,
+ struct iw_cm_conn_param *conn_param,
struct i40iw_cm_info *cm_info)
{
struct i40iw_cm_node *cm_node;
@@ -2894,6 +2890,9 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_node *loopback_remotenode;
struct i40iw_cm_info loopback_cm_info;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
/* create a CM connection node */
cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
if (!cm_node)
@@ -2902,6 +2901,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
loopback_remotelistener = i40iw_find_listener(
cm_core,
@@ -2935,6 +2936,10 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
private_data_len);
loopback_remotenode->pdata.size = private_data_len;
+ if (loopback_remotenode->ord_size > cm_node->ird_size)
+ loopback_remotenode->ord_size =
+ cm_node->ird_size;
+
cm_node->state = I40IW_CM_STATE_OFFLOADED;
cm_node->tcp_cntxt.rcv_nxt =
loopback_remotenode->tcp_cntxt.loc_seq_num;
@@ -3691,7 +3696,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status =
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
if (status)
@@ -3815,9 +3820,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
__func__, cm_id->tos, cm_info.user_pri);
cm_id->add_ref(cm_id);
cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
- conn_param->private_data_len,
- (void *)conn_param->private_data,
- &cm_info);
+ conn_param, &cm_info);
if (IS_ERR(cm_node)) {
ret = PTR_ERR(cm_node);
@@ -3849,11 +3852,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
cm_node->apbvt_set = true;
- i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
- if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
- !cm_node->ord_size)
- cm_node->ord_size = 1;
-
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
@@ -4058,7 +4056,7 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
0);
if (status)
@@ -4242,10 +4240,16 @@ set_qhash:
}
/**
- * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * i40iw_cm_teardown_connections - teardown QPs
* @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @disconnect_all: flag indicating disconnect all QPs
+ * teardown QPs where source or destination addr matches ip addr
*/
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
struct list_head *list_core_temp;
@@ -4259,8 +4263,13 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
spin_lock_irqsave(&cm_core->ht_lock, flags);
list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
cm_node = container_of(list_node, struct i40iw_cm_node, list);
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->connected_entry, &connected_list);
+ if (disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
+ !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->connected_entry, &connected_list);
+ }
}
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
@@ -4294,6 +4303,9 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
enum i40iw_quad_hash_manage_type op =
ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+ nfo.vlan_id = vlan_id;
+ nfo.ipv4 = ipv4;
+
/* Disable or enable qhash for listeners */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
@@ -4303,8 +4315,6 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
memcpy(nfo.loc_addr, listen_node->loc_addr,
sizeof(nfo.loc_addr));
nfo.loc_port = listen_node->loc_port;
- nfo.ipv4 = listen_node->ipv4;
- nfo.vlan_id = listen_node->vlan_id;
nfo.user_pri = listen_node->user_pri;
if (!list_empty(&listen_node->child_listen_list)) {
i40iw_qhash_ctrl(iwdev,
@@ -4326,7 +4336,7 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
}
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- /* disconnect any connected qp's on ifdown */
+ /* teardown connected qp's on ifdown */
if (!ifup)
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 0d5840d2c4fc..cf60c451e071 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -276,8 +276,6 @@ struct i40iw_cm_tcp_context {
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
-
- struct timeval sent_ts;
};
enum i40iw_cm_listener_state {
@@ -337,7 +335,7 @@ struct i40iw_cm_node {
u16 mpav2_ird_ord;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct i40iw_cm_listener *listener;
int apbvt_set;
int accept_pend;
@@ -455,5 +453,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index da9821a10e0d..c74fd3309b93 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1893,8 +1893,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
u32 count)
{
- if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
- return I40IW_ERR_INVALID_SIZE;
if (dev->is_pf)
i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
@@ -3872,7 +3870,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
struct i40iw_virt_mem virt_mem;
u32 i, mem_size;
u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
- u32 powerof2;
u64 sd_needed;
u32 loop_count = 0;
@@ -3928,8 +3925,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
+ roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
+ roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
@@ -3945,16 +3944,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
if ((loop_count > 1000) ||
((!(loop_count % 10)) &&
(qpwanted > qpwantedoriginal * 2 / 3))) {
- if (qpwanted > FPM_MULTIPLIER) {
- qpwanted -= FPM_MULTIPLIER;
- powerof2 = 1;
- while (powerof2 < qpwanted)
- powerof2 *= 2;
- powerof2 /= 2;
- qpwanted = powerof2;
- } else {
- qpwanted /= 2;
- }
+ if (qpwanted > FPM_MULTIPLIER)
+ qpwanted = roundup_pow_of_two(qpwanted -
+ FPM_MULTIPLIER);
+ qpwanted >>= 1;
}
if (mrwanted > FPM_MULTIPLIER * 10)
mrwanted -= FPM_MULTIPLIER * 10;
@@ -3962,8 +3955,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
pblewanted -= FPM_MULTIPLIER * 1000;
} while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
- sd_needed = i40iw_est_sd(dev, hmc_info);
-
i40iw_debug(dev, I40IW_DEBUG_HMC,
"loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
loop_count, sd_needed,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 029083cb81d5..4b65e4140bd7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -97,6 +97,7 @@
#define RDMA_OPCODE_MASK 0x0f
#define RDMA_READ_REQ_OPCODE 1
#define Q2_BAD_FRAME_OFFSET 72
+#define Q2_FPSN_OFFSET 64
#define CQE_MAJOR_DRV 0x8000
#define I40IW_TERM_SENT 0x01
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index e96bdafbcbb3..61540e14e4b9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -385,6 +385,8 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
}
break;
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ break;
case I40IW_AE_PRIV_OPERATION_DENIED:
case I40IW_AE_STAG_ZERO_INVALID:
case I40IW_AE_IB_RREQ_AND_Q1_FULL:
@@ -403,7 +405,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
case I40IW_AE_LLP_SYN_RECEIVED:
case I40IW_AE_LLP_TOO_MANY_RETRIES:
- case I40IW_AE_LLP_DOUBT_REACHABILITY:
case I40IW_AE_LCE_QP_CATASTROPHIC:
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e824296713e2..b08862978de8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,6 +99,10 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
+static struct notifier_block i40iw_netdevice_notifier = {
+ .notifier_call = i40iw_netdevice_event
+};
+
/**
* i40iw_find_i40e_handler - find a handler given a client info
* @ldev: pointer to a client info
@@ -483,6 +487,7 @@ static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ info.add_sd_cnt = 0;
status = i40iw_create_hmc_obj_type(dev, &info);
if (status) {
i40iw_pr_err("create obj type %d status = %d\n",
@@ -607,7 +612,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
/* init the waitq of the cqp_requests and add them to the list */
- for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
+ for (i = 0; i < sqsize; i++) {
init_waitqueue_head(&cqp->cqp_requests[i].waitq);
list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
}
@@ -1285,7 +1290,7 @@ static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
__LINE__, statuscpu2);
if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
break; /* SUCCESS */
- mdelay(1000);
+ msleep(1000);
retrycount++;
} while (retrycount < 14);
i40iw_wr32(hw, 0xb4040, 0x4C104C5);
@@ -1393,6 +1398,7 @@ static void i40iw_register_notifiers(void)
register_inetaddr_notifier(&i40iw_inetaddr_notifier);
register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
register_netevent_notifier(&i40iw_net_notifier);
+ register_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1404,6 +1410,7 @@ static void i40iw_unregister_notifiers(void)
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ unregister_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1793,7 +1800,7 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
if (reset)
iwdev->reset = true;
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
destroy_workqueue(iwdev->virtchnl_wq);
i40iw_deinit_device(iwdev);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 796a815b53fd..4c21197830b3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -48,7 +48,6 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc, bool initial);
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
/**
* i40iw_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -1378,7 +1377,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
u32 rcv_wnd = hw_host_ctx[23];
/* first partial seq # in q2 */
- u32 fps = qp->q2_buf[16];
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
struct list_head *rxlist = &pfpdu->rxlist;
struct list_head *plist;
@@ -1483,7 +1482,7 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
* @ieq: ieq resource
* @qp: all pending fpdu buffers
*/
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
{
struct i40iw_puda_buf *buf;
struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index 660aa3edae56..53a7d58c84b5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -184,4 +184,5 @@ enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 3ec5389a81a1..8afa5a67a86b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -894,20 +894,6 @@ exit:
}
/**
- * i40iw_qp_roundup - return round up QP WQ depth
- * @wqdepth: WQ depth in quantas to round up
- */
-static int i40iw_qp_round_up(u32 wqdepth)
-{
- int scount = 1;
-
- for (wqdepth--; scount <= 16; scount *= 2)
- wqdepth |= wqdepth >> scount;
-
- return ++wqdepth;
-}
-
-/**
* i40iw_get_wqe_shift - get shift count for maximum wqe size
* @sge: Maximum Scatter Gather Elements wqe
* @inline_data: Maximum inline data size
@@ -934,7 +920,7 @@ void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
*/
enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
{
- *sqdepth = i40iw_qp_round_up((sq_size << shift) + I40IW_SQ_RSVD);
+ *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD);
if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
@@ -953,7 +939,7 @@ enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
*/
enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
{
- *rqdepth = i40iw_qp_round_up((rq_size << shift) + I40IW_RQ_RSVD);
+ *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD);
if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index e73efc59a0ab..b125925641e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -59,7 +59,6 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_CEQ_ENTRIES = 131071,
I40IW_MIN_CQ_SIZE = 1,
I40IW_MAX_CQ_SIZE = 1048575,
- I40IW_MAX_AEQ_ALLOCATE_COUNT = 255,
I40IW_DB_ID_ZERO = 0,
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
@@ -72,7 +71,7 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
- I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 8845dba7c438..ddc1056b0b4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -137,7 +137,7 @@ inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
}
/**
- * i40iw_inetaddr_event - system notifier for netdev events
+ * i40iw_inetaddr_event - system notifier for ipv4 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -200,7 +200,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
}
/**
- * i40iw_inet6addr_event - system notifier for ipv6 netdev events
+ * i40iw_inet6addr_event - system notifier for ipv6 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -252,7 +252,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
}
/**
- * i40iw_net_event - system notifier for net events
+ * i40iw_net_event - system notifier for netevents
* @notfier: not used
* @event: event for notifier
* @ptr: neighbor
@@ -297,6 +297,50 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
}
/**
+ * i40iw_netdevice_event - system notifier for netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *event_netdev;
+ struct net_device *netdev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)
+ return NOTIFY_DONE;
+
+ netdev = iwdev->ldev->netdev;
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ iwdev->iw_status = 1;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ /* Fall through */
+ case NETDEV_UP:
+ i40iw_port_ibevent(iwdev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
* i40iw_get_cqp_request - get cqp struct
* @cqp: device cqp ptr
* @wait: cqp to be used in wait mode
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 3c6f3ce88f89..70024e8e2692 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -412,6 +412,7 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
{
struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+ i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
@@ -1637,6 +1638,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
err_code = -EOVERFLOW;
goto err;
}
+ stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
@@ -2242,14 +2244,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
}
@@ -2271,7 +2271,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op_type = I40IW_OP_TYPE_RDMA_READ;
info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index bf4f14a1b4fc..9a566ee3ceff 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -170,7 +170,7 @@ err_buf:
return err;
}
-#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
+#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -246,7 +246,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
cq->db.dma, &cq->mcq, vector, 0,
- !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
if (err)
goto err_dbmap;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8c8a16791a3f..8d2ee9322f2e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -589,6 +589,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (props->rss_caps.supported_qpts) {
resp.rss_caps.rx_hash_function =
MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+
resp.rss_caps.rx_hash_fields_mask =
MLX4_IB_RX_HASH_SRC_IPV4 |
MLX4_IB_RX_HASH_DST_IPV4 |
@@ -598,6 +599,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP;
+
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ resp.rss_caps.rx_hash_fields_mask |=
+ MLX4_IB_RX_HASH_INNER;
}
}
@@ -2995,9 +3001,8 @@ err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (i = 0; i < ibdev->num_ports; ++i)
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
@@ -3102,11 +3107,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
- }
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index caf490ab24c8..f045491f2c14 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -734,10 +734,24 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /*
+ * Hash according to inner headers if exist, otherwise
+ * according to outer headers.
+ */
+ rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
+ } else {
+ pr_debug("RSS Hash for inner headers isn't supported\n");
+ return (-EOPNOTSUPP);
+ }
+ }
+
return 0;
}
-static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
+static int create_qp_rss(struct mlx4_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx4_ib_create_qp_rss *ucmd,
struct mlx4_ib_qp *qp)
@@ -860,7 +874,7 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- err = create_qp_rss(to_mdev(pd->device), pd, init_attr, &ucmd, qp);
+ err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1836,6 +1850,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
mlx4_ib_gid_index_to_real_index(dev, port,
grh->sgid_index);
+ if (real_sgid_index < 0)
+ return real_sgid_index;
if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 2d32b519bb61..985fa2637390 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -247,21 +247,30 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
}
}
-static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 *var)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
void *out;
void *field;
int err;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+ if (!out) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
node = mlx5_ib_param_to_node(offset);
- err = mlx5_cmd_query_cong_params(dev->mdev, node, out, outlen);
+ err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
if (err)
goto free;
@@ -270,21 +279,32 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
free:
kvfree(out);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
-static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 var)
{
int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
void *in;
void *field;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
u32 attr_mask = 0;
int err;
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (!in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
MLX5_SET(modify_cong_params_in, in, opcode,
MLX5_CMD_OP_MODIFY_CONG_PARAMS);
@@ -299,8 +319,10 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
- err = mlx5_cmd_modify_cong_params(dev->mdev, in, inlen);
+ err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
kvfree(in);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
@@ -324,7 +346,7 @@ static ssize_t set_param(struct file *filp, const char __user *buf,
if (kstrtou32(lbuf, 0, &var))
return -EINVAL;
- ret = mlx5_ib_set_cc_params(param->dev, offset, var);
+ ret = mlx5_ib_set_cc_params(param->dev, param->port_num, offset, var);
return ret ? ret : count;
}
@@ -340,7 +362,7 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
if (*pos)
return 0;
- ret = mlx5_ib_get_cc_params(param->dev, offset, &var);
+ ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
if (ret)
return ret;
@@ -362,44 +384,51 @@ static const struct file_operations dbg_cc_fops = {
.read = get_param,
};
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev)
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
if (!mlx5_debugfs_root ||
- !dev->dbg_cc_params ||
- !dev->dbg_cc_params->root)
+ !dev->port[port_num].dbg_cc_params ||
+ !dev->port[port_num].dbg_cc_params->root)
return;
- debugfs_remove_recursive(dev->dbg_cc_params->root);
- kfree(dev->dbg_cc_params);
- dev->dbg_cc_params = NULL;
+ debugfs_remove_recursive(dev->port[port_num].dbg_cc_params->root);
+ kfree(dev->port[port_num].dbg_cc_params);
+ dev->port[port_num].dbg_cc_params = NULL;
}
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_core_dev *mdev;
int i;
if (!mlx5_debugfs_root)
goto out;
- if (!MLX5_CAP_GEN(dev->mdev, cc_query_allowed) ||
- !MLX5_CAP_GEN(dev->mdev, cc_modify_allowed))
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
goto out;
+ if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
+ !MLX5_CAP_GEN(mdev, cc_modify_allowed))
+ goto put_mdev;
+
dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
if (!dbg_cc_params)
- goto out;
+ goto err;
- dev->dbg_cc_params = dbg_cc_params;
+ dev->port[port_num].dbg_cc_params = dbg_cc_params;
dbg_cc_params->root = debugfs_create_dir("cc_params",
- dev->mdev->priv.dbg_root);
+ mdev->priv.dbg_root);
if (!dbg_cc_params->root)
goto err;
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
dbg_cc_params->params[i].dentry =
debugfs_create_file(mlx5_ib_dbg_cc_name[i],
0600, dbg_cc_params->root,
@@ -408,11 +437,17 @@ int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
if (!dbg_cc_params->params[i].dentry)
goto err;
}
-out: return 0;
+
+put_mdev:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+out:
+ return 0;
err:
mlx5_ib_warn(dev, "cong debugfs failure\n");
- mlx5_ib_cleanup_cong_debugfs(dev);
+ mlx5_ib_cleanup_cong_debugfs(dev, port_num);
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+
/*
* We don't want to fail driver if debugfs failed to initialize,
* so we are not forwarding error to the user.
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 18705cbcdc8c..5b974fb97611 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1010,7 +1010,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
- if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 1003b0133a49..32a9e9228b13 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -197,10 +197,9 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
-static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
int err;
void *out_cnt;
@@ -222,7 +221,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
+ err = mlx5_core_query_vport_counter(mdev, 0, 0,
port_num, out_cnt, sz);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
@@ -235,7 +234,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
+ err = mlx5_core_query_ib_ppcnt(mdev, port_num,
out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
@@ -255,9 +254,11 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
+ int ret;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)))
@@ -265,14 +266,20 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
memset(out_mad->data, 0, sizeof(out_mad->data));
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return IB_MAD_RESULT_FAILURE;
+
if (MLX5_CAP_GEN(mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+ ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
} else {
- return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
in_mad, out_mad);
}
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return ret;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
@@ -519,7 +526,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
int ext_active_speed;
int err = -ENOMEM;
- if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+ if (port < 1 || port > dev->num_ports) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 262c1aa2e028..4236c8086820 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -50,16 +50,14 @@
#include <rdma/ib_cache.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
-#include <linux/mlx5/fs.h>
-#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "cmd.h"
-#include <linux/mlx5/vport.h>
#define DRIVER_NAME "mlx5_ib"
#define DRIVER_VERSION "5.0-0"
@@ -72,10 +70,36 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION "\n";
+struct mlx5_ib_event_work {
+ struct work_struct work;
+ struct mlx5_core_dev *dev;
+ void *context;
+ enum mlx5_dev_event event;
+ unsigned long param;
+};
+
enum {
MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
};
+static struct workqueue_struct *mlx5_ib_event_wq;
+static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
+static LIST_HEAD(mlx5_ib_dev_list);
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
+
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
+{
+ struct mlx5_ib_dev *dev;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ dev = mpi->ibdev;
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return dev;
+}
+
static enum rdma_link_layer
mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
{
@@ -115,24 +139,32 @@ static int get_port_state(struct ib_device *ibdev,
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
- roce.nb);
+ u8 port_num = roce->native_port_num;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = roce->dev;
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
- write_lock(&ibdev->roce.netdev_lock);
- if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
- ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
- NULL : ndev;
- write_unlock(&ibdev->roce.netdev_lock);
+ write_lock(&roce->netdev_lock);
+
+ if (ndev->dev.parent == &mdev->pdev->dev)
+ roce->netdev = (event == NETDEV_UNREGISTER) ?
+ NULL : ndev;
+ write_unlock(&roce->netdev_lock);
break;
case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
- struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
struct net_device *upper = NULL;
if (lag_ndev) {
@@ -140,27 +172,28 @@ static int mlx5_netdev_event(struct notifier_block *this,
dev_put(lag_ndev);
}
- if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
+ if ((upper == ndev || (!upper && ndev == roce->netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
enum ib_port_state port_state;
- if (get_port_state(&ibdev->ib_dev, 1, &port_state))
- return NOTIFY_DONE;
+ if (get_port_state(&ibdev->ib_dev, port_num,
+ &port_state))
+ goto done;
- if (ibdev->roce.last_port_state == port_state)
- return NOTIFY_DONE;
+ if (roce->last_port_state == port_state)
+ goto done;
- ibdev->roce.last_port_state = port_state;
+ roce->last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
if (port_state == IB_PORT_DOWN)
ibev.event = IB_EVENT_PORT_ERR;
else if (port_state == IB_PORT_ACTIVE)
ibev.event = IB_EVENT_PORT_ACTIVE;
else
- return NOTIFY_DONE;
+ goto done;
- ibev.element.port_num = 1;
+ ibev.element.port_num = port_num;
ib_dispatch_event(&ibev);
}
break;
@@ -169,7 +202,8 @@ static int mlx5_netdev_event(struct notifier_block *this,
default:
break;
}
-
+done:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return NOTIFY_DONE;
}
@@ -178,22 +212,88 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
+ struct mlx5_core_dev *mdev;
- ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NULL;
+
+ ndev = mlx5_lag_get_roce_netdev(mdev);
if (ndev)
- return ndev;
+ goto out;
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
- read_lock(&ibdev->roce.netdev_lock);
- ndev = ibdev->roce.netdev;
+ read_lock(&ibdev->roce[port_num - 1].netdev_lock);
+ ndev = ibdev->roce[port_num - 1].netdev;
if (ndev)
dev_hold(ndev);
- read_unlock(&ibdev->roce.netdev_lock);
+ read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+out:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return ndev;
}
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
+ u8 ib_port_num,
+ u8 *native_port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ ib_port_num);
+ struct mlx5_core_dev *mdev = NULL;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (native_port_num)
+ *native_port_num = 1;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return ibdev->mdev;
+
+ port = &ibdev->port[ib_port_num - 1];
+ if (!port)
+ return NULL;
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[ib_port_num - 1].mp.mpi;
+ if (mpi && !mpi->unaffiliate) {
+ mdev = mpi->mdev;
+ /* If it's the master no need to refcount, it'll exist
+ * as long as the ib_dev exists.
+ */
+ if (!mpi->is_master)
+ mpi->mdev_refcnt++;
+ }
+ spin_unlock(&port->mp.mpi_lock);
+
+ return mdev;
+}
+
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ port_num);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ port = &ibdev->port[port_num - 1];
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[port_num - 1].mp.mpi;
+ if (mpi->is_master)
+ goto out;
+
+ mpi->mdev_refcnt--;
+ if (mpi->unaffiliate)
+ complete(&mpi->unref_comp);
+out:
+ spin_unlock(&port->mp.mpi_lock);
+}
+
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
@@ -256,19 +356,33 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
+ bool put_mdev = true;
u16 qkey_viol_cntr;
u32 eth_prot_oper;
+ u8 mdev_port_num;
int err;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* This means the port isn't affiliated yet. Get the
+ * info for the master port instead.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ port_num = 1;
+ }
+
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
- err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
+ err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
+ mdev_port_num);
if (err)
- return err;
+ goto out;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
@@ -284,12 +398,16 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
props->state = IB_PORT_DOWN;
props->phys_state = 3;
- mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
props->qkey_viol_cntr = qkey_viol_cntr;
+ /* If this is a stub query for an unaffiliated port stop here */
+ if (!put_mdev)
+ goto out;
+
ndev = mlx5_ib_get_netdev(device, port_num);
if (!ndev)
- return 0;
+ goto out;
if (mlx5_lag_is_active(dev->mdev)) {
rcu_read_lock();
@@ -312,7 +430,10 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
dev_put(ndev);
props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
- return 0;
+out:
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
}
static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
@@ -354,7 +475,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
roce_l3_type, gid->raw, mac, vlan,
- vlan_id);
+ vlan_id, port_num);
}
static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
@@ -438,11 +559,11 @@ static int mlx5_get_vport_access_method(struct ib_device *ibdev)
}
static void get_atomic_caps(struct mlx5_ib_dev *dev,
+ u8 atomic_size_qp,
struct ib_device_attr *props)
{
u8 tmp;
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
u8 atomic_req_8B_endianness_mode =
MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
@@ -459,6 +580,29 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev,
}
}
+static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
+{
+ struct ib_device_attr props = {};
+
+ get_atomic_caps_dc(dev, &props);
+ return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
+}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -587,6 +731,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ bool raw_support = !mlx5_core_mp_enabled(mdev);
struct mlx5_ib_query_device_resp resp = {};
size_t resp_len;
u64 max_tso;
@@ -650,7 +795,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
if (MLX5_CAP_ETH(mdev, csum_cap)) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
@@ -682,7 +827,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_TCP |
MLX5_RX_HASH_DST_PORT_TCP |
MLX5_RX_HASH_SRC_PORT_UDP |
- MLX5_RX_HASH_DST_PORT_UDP;
+ MLX5_RX_HASH_DST_PORT_UDP |
+ MLX5_RX_HASH_INNER;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -698,7 +844,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
- MLX5_CAP_GEN(dev->mdev, general_notification_event))
+ MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
+ raw_support)
props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
@@ -706,7 +853,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
+ raw_support) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
@@ -746,7 +894,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len =
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
- get_atomic_caps(dev, props);
+ get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
@@ -770,7 +918,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ IB_LINK_LAYER_ETHERNET && raw_support) {
props->rss_caps.max_rwq_indirection_tables =
1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
props->rss_caps.max_rwq_indirection_table_size =
@@ -807,7 +955,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cqe_comp_caps);
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
+ raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
resp.packet_pacing_caps.qp_rate_limit_max =
@@ -866,7 +1015,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
+ raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
@@ -1097,7 +1247,22 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
}
if (!ret && props) {
- count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
+ if (!mdev) {
+ /* If the port isn't affiliated yet query the master.
+ * The master and slave will have the same values.
+ */
+ mdev = dev->mdev;
+ port = 1;
+ put_mdev = false;
+ }
+ count = mlx5_core_reserved_gids_count(mdev);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
props->gid_tbl_len -= count;
}
return ret;
@@ -1122,20 +1287,43 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
+ u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+ u8 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
+ if (!mdev) {
+ /* The port isn't affiliated yet, get the PKey from the master
+ * port. For RoCE the PKey tables will be the same.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
+ index, pkey);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+ return err;
+}
+
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
- return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
- pkey);
+ return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
default:
return -EINVAL;
}
@@ -1174,23 +1362,32 @@ static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
u32 value)
{
struct mlx5_hca_vport_context ctx = {};
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
int err;
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return -ENODEV;
+
+ err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
if (err)
- return err;
+ goto out;
if (~ctx.cap_mask1_perm & mask) {
mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
mask, ctx.cap_mask1_perm);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
ctx.cap_mask1 = value;
ctx.cap_mask1_perm = mask;
- err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
+ 0, &ctx);
+
+out:
+ mlx5_ib_put_native_port_mdev(dev, port_num);
return err;
}
@@ -1241,9 +1438,18 @@ static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
}
+static u16 calc_dynamic_bfregs(int uars_per_sys_page)
+{
+ /* Large page with non 4k uar support might limit the dynamic size */
+ if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
+ return MLX5_MIN_DYN_BFREGS;
+
+ return MLX5_MAX_DYN_BFREGS;
+}
+
static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
struct mlx5_ib_alloc_ucontext_req_v2 *req,
- u32 *num_sys_pages)
+ struct mlx5_bfreg_info *bfregi)
{
int uars_per_sys_page;
int bfregs_per_sys_page;
@@ -1260,16 +1466,21 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+ /* This holds the required static allocation asked by the user */
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
- *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
-
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL;
- mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
+ bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+ bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
+ bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
+ bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
+
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs,
- req->total_num_bfregs, *num_sys_pages);
+ req->total_num_bfregs, bfregi->total_num_bfregs,
+ bfregi->num_sys_pages);
return 0;
}
@@ -1281,13 +1492,17 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
int i;
bfregi = &context->bfregi;
- for (i = 0; i < bfregi->num_sys_pages; i++) {
+ for (i = 0; i < bfregi->num_static_sys_pages; i++) {
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
if (err)
goto error;
mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
}
+
+ for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
+ bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
+
return 0;
error:
@@ -1306,12 +1521,16 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
- err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
- if (err) {
- mlx5_ib_warn(dev, "failed to free uar %d\n", i);
- return err;
+ if (i < bfregi->num_static_sys_pages ||
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
+ err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
+ return err;
+ }
}
}
+
return 0;
}
@@ -1362,6 +1581,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_ucontext *context;
struct mlx5_bfreg_info *bfregi;
int ver;
@@ -1422,13 +1642,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
bfregi = &context->bfregi;
/* updates req->total_num_bfregs */
- err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
+ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
goto out_ctx;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
- bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
+ bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
@@ -1470,7 +1690,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->db_page_mutex);
resp.tot_bfregs = req.total_num_bfregs;
- resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
+ resp.num_ports = dev->num_ports;
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
@@ -1489,6 +1709,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.eth_min_inline);
}
+ if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
+ if (mdev->clock_info)
+ resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
+ resp.response_length += sizeof(resp.clock_info_versions);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
@@ -1502,8 +1728,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.hca_core_clock_offset =
offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
}
- resp.response_length += sizeof(resp.hca_core_clock_offset) +
- sizeof(resp.reserved2);
+ resp.response_length += sizeof(resp.hca_core_clock_offset);
}
if (field_avail(typeof(resp), log_uar_size, udata->outlen))
@@ -1512,6 +1737,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
resp.response_length += sizeof(resp.num_uars_per_page);
+ if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
+ resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
+ resp.response_length += sizeof(resp.num_dyn_bfregs);
+ }
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_td;
@@ -1566,15 +1796,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi,
- int idx)
+ int uar_idx)
{
int fw_uars_per_page;
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
- bfregi->sys_pages[idx] / fw_uars_per_page;
+ return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -1592,6 +1820,12 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+/* Index resides in an extra byte to enable larger values than 255 */
+static int get_extended_index(unsigned long offset)
+{
+ return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
+}
+
static void mlx5_ib_vma_open(struct vm_area_struct *area)
{
/* vma_open is called when a new VMA is created on top of our VMA. This
@@ -1733,6 +1967,38 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
}
}
+static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
+{
+ phys_addr_t pfn;
+ int err;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
+ return -EOPNOTSUPP;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (!dev->mdev->clock_info_page)
+ return -EOPNOTSUPP;
+
+ pfn = page_to_pfn(dev->mdev->clock_info_page);
+ err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (err)
+ return err;
+
+ mlx5_ib_dbg(dev, "mapped clock info at 0x%lx, PA 0x%llx\n",
+ vma->vm_start,
+ (unsigned long long)pfn << PAGE_SHIFT);
+
+ return mlx5_ib_set_vma_data(vma, context);
+}
+
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
@@ -1742,21 +2008,29 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
unsigned long idx;
phys_addr_t pfn, pa;
pgprot_t prot;
- int uars_per_page;
+ u32 bfreg_dyn_idx = 0;
+ u32 uar_index;
+ int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
+ int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
+ bfregi->num_static_sys_pages;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
- idx = get_index(vma->vm_pgoff);
- if (idx % uars_per_page ||
- idx * uars_per_page >= bfregi->num_sys_pages) {
- mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
+ if (dyn_uar)
+ idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
+ else
+ idx = get_index(vma->vm_pgoff);
+
+ if (idx >= max_valid_idx) {
+ mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
+ idx, max_valid_idx);
return -EINVAL;
}
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
/* Some architectures don't support WC memory */
#if defined(CONFIG_X86)
if (!pat_enabled())
@@ -1776,7 +2050,40 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL;
}
- pfn = uar_index2pfn(dev, bfregi, idx);
+ if (dyn_uar) {
+ int uars_per_page;
+
+ uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+ bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
+ if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
+ mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
+ bfreg_dyn_idx, bfregi->total_num_bfregs);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bfregi->lock);
+ /* Fail if uar already allocated, first bfreg index of each
+ * page holds its count.
+ */
+ if (bfregi->count[bfreg_dyn_idx]) {
+ mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
+ mutex_unlock(&bfregi->lock);
+ return -EINVAL;
+ }
+
+ bfregi->count[bfreg_dyn_idx]++;
+ mutex_unlock(&bfregi->lock);
+
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ if (err) {
+ mlx5_ib_warn(dev, "UAR alloc failed\n");
+ goto free_bfreg;
+ }
+ } else {
+ uar_index = bfregi->sys_pages[idx];
+ }
+
+ pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot;
@@ -1785,14 +2092,32 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
if (err) {
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
- return -EAGAIN;
+ err = -EAGAIN;
+ goto err;
}
pa = pfn << PAGE_SHIFT;
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa);
- return mlx5_ib_set_vma_data(vma, context);
+ err = mlx5_ib_set_vma_data(vma, context);
+ if (err)
+ goto err;
+
+ if (dyn_uar)
+ bfregi->sys_pages[idx] = uar_index;
+ return 0;
+
+err:
+ if (!dyn_uar)
+ return err;
+
+ mlx5_cmd_free_uar(dev->mdev, idx);
+
+free_bfreg:
+ mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
+
+ return err;
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -1807,6 +2132,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
@@ -1835,6 +2161,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
vma->vm_start,
(unsigned long long)pfn << PAGE_SHIFT);
break;
+ case MLX5_IB_MMAP_CLOCK_INFO:
+ return mlx5_ib_mmap_clock_info_page(dev, vma, context);
default:
return -EINVAL;
@@ -2663,7 +2991,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOMEM);
if (domain != IB_FLOW_DOMAIN_USER ||
- flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
+ flow_attr->port > dev->num_ports ||
(flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
return ERR_PTR(-EINVAL);
@@ -2928,15 +3256,24 @@ static void delay_drop_handler(struct work_struct *work)
mutex_unlock(&delay_drop->lock);
}
-static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param)
+static void mlx5_ib_handle_event(struct work_struct *_work)
{
- struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
+ struct mlx5_ib_event_work *work =
+ container_of(_work, struct mlx5_ib_event_work, work);
+ struct mlx5_ib_dev *ibdev;
struct ib_event ibev;
bool fatal = false;
u8 port = 0;
- switch (event) {
+ if (mlx5_core_is_mp_slave(work->dev)) {
+ ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
+ if (!ibdev)
+ goto out;
+ } else {
+ ibdev = work->context;
+ }
+
+ switch (work->event) {
case MLX5_DEV_EVENT_SYS_ERROR:
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
@@ -2946,39 +3283,39 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- port = (u8)param;
+ port = (u8)work->param;
/* In RoCE, port up/down events are handled in
* mlx5_netdev_event().
*/
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET)
- return;
+ goto out;
- ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
+ ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break;
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3000,9 +3337,26 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (fatal)
ibdev->ib_active = false;
-
out:
- return;
+ kfree(work);
+}
+
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5_ib_event_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->dev = dev;
+ work->param = param;
+ work->context = context;
+ work->event = event;
+
+ queue_work(mlx5_ib_event_wq, &work->work);
}
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
@@ -3011,7 +3365,7 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
+ for (port = 1; port <= dev->num_ports; port++) {
dev->mdev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
@@ -3038,16 +3392,15 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
+ for (port = 1; port <= dev->num_ports; port++)
mlx5_query_ext_port_caps(dev, port);
}
-static int get_port_caps(struct mlx5_ib_dev *dev)
+static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
- int port;
struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
@@ -3068,22 +3421,21 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
goto out;
}
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
- memset(pprops, 0, sizeof(*pprops));
- err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
- if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n",
- port, err);
- break;
- }
- dev->mdev->port_caps[port - 1].pkey_table_len =
- dprops->max_pkeys;
- dev->mdev->port_caps[port - 1].gid_table_len =
- pprops->gid_tbl_len;
- mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
- dprops->max_pkeys, pprops->gid_tbl_len);
+ memset(pprops, 0, sizeof(*pprops));
+ err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
+ if (err) {
+ mlx5_ib_warn(dev, "query_port %d failed %d\n",
+ port, err);
+ goto out;
}
+ dev->mdev->port_caps[port - 1].pkey_table_len =
+ dprops->max_pkeys;
+ dev->mdev->port_caps[port - 1].gid_table_len =
+ pprops->gid_tbl_len;
+ mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
+ port, dprops->max_pkeys, pprops->gid_tbl_len);
+
out:
kfree(pprops);
kfree(dprops);
@@ -3373,12 +3725,14 @@ static u32 get_core_cap_flags(struct ib_device *ibdev)
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
+ bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
u32 ret = 0;
if (ll == IB_LINK_LAYER_INFINIBAND)
return RDMA_CORE_PORT_IBA_IB;
- ret = RDMA_CORE_PORT_RAW_PACKET;
+ if (raw_support)
+ ret = RDMA_CORE_PORT_RAW_PACKET;
if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
return ret;
@@ -3468,33 +3822,33 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- dev->roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce.nb);
+ dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->roce[port_num].nb);
if (err) {
- dev->roce.nb.notifier_call = NULL;
+ dev->roce[port_num].nb.notifier_call = NULL;
return err;
}
return 0;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
- if (dev->roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->roce.nb);
- dev->roce.nb.notifier_call = NULL;
+ if (dev->roce[port_num].nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->roce[port_num].nb);
+ dev->roce[port_num].nb.notifier_call = NULL;
}
}
-static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- err = mlx5_add_netdev_notifier(dev);
+ err = mlx5_add_netdev_notifier(dev, port_num);
if (err)
return err;
@@ -3515,7 +3869,7 @@ err_disable_roce:
mlx5_nic_vport_disable_roce(dev->mdev);
err_unregister_netdevice_notifier:
- mlx5_remove_netdev_notifier(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
return err;
}
@@ -3577,11 +3931,12 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
- unsigned int i;
+ int i;
for (i = 0; i < dev->num_ports; i++) {
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
+ if (dev->port[i].cnts.set_id)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].cnts.set_id);
kfree(dev->port[i].cnts.names);
kfree(dev->port[i].cnts.offsets);
}
@@ -3623,6 +3978,7 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
err_names:
kfree(cnts->names);
+ cnts->names = NULL;
return -ENOMEM;
}
@@ -3669,37 +4025,33 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ int err = 0;
int i;
- int ret;
for (i = 0; i < dev->num_ports; i++) {
- struct mlx5_ib_port *port = &dev->port[i];
+ err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
+ if (err)
+ goto err_alloc;
+
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
+ dev->port[i].cnts.offsets);
- ret = mlx5_core_alloc_q_counter(dev->mdev,
- &port->cnts.set_id);
- if (ret) {
+ err = mlx5_core_alloc_q_counter(dev->mdev,
+ &dev->port[i].cnts.set_id);
+ if (err) {
mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n",
- i + 1, ret);
- goto dealloc_counters;
+ i + 1, err);
+ goto err_alloc;
}
-
- ret = __mlx5_ib_alloc_counters(dev, &port->cnts);
- if (ret)
- goto dealloc_counters;
-
- mlx5_ib_fill_counters(dev, port->cnts.names,
- port->cnts.offsets);
+ dev->port[i].cnts.set_id_valid = true;
}
return 0;
-dealloc_counters:
- while (--i >= 0)
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
-
- return ret;
+err_alloc:
+ mlx5_ib_dealloc_counters(dev);
+ return err;
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
@@ -3718,7 +4070,7 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
+static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct mlx5_ib_port *port,
struct rdma_hw_stats *stats)
{
@@ -3731,7 +4083,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
if (!out)
return -ENOMEM;
- ret = mlx5_core_query_q_counter(dev->mdev,
+ ret = mlx5_core_query_q_counter(mdev,
port->cnts.set_id, 0,
out, outlen);
if (ret)
@@ -3753,28 +4105,43 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1];
+ struct mlx5_core_dev *mdev;
int ret, num_counters;
+ u8 mdev_port_num;
if (!stats)
return -EINVAL;
- ret = mlx5_ib_query_q_counters(dev, port, stats);
+ num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+
+ /* q_counters are per IB device, query the master mdev */
+ ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
if (ret)
return ret;
- num_counters = port->cnts.num_q_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
+ &mdev_port_num);
+ if (!mdev) {
+ /* If port is not affiliated yet, its in down state
+ * which doesn't have any counters yet, so it would be
+ * zero. So no need to read from the HCA.
+ */
+ goto done;
+ }
ret = mlx5_lag_query_cong_counters(dev->mdev,
stats->value +
port->cnts.num_q_counters,
port->cnts.num_cong_counters,
port->cnts.offsets +
port->cnts.num_q_counters);
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
if (ret)
return ret;
- num_counters += port->cnts.num_cong_counters;
}
+done:
return num_counters;
}
@@ -3936,36 +4303,250 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
return mlx5_get_vector_affinity(dev->mdev, comp_vector);
}
-static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
{
- struct mlx5_ib_dev *dev;
- enum rdma_link_layer ll;
- int port_type_cap;
- const char *name;
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ struct mlx5_ib_port *port = &ibdev->port[port_num];
+ int comps;
int err;
int i;
- port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+ mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
- printk_once(KERN_INFO "%s", mlx5_version);
+ spin_lock(&port->mp.mpi_lock);
+ if (!mpi->ibdev) {
+ spin_unlock(&port->mp.mpi_lock);
+ return;
+ }
+ mpi->ibdev = NULL;
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
- if (!dev)
- return NULL;
+ spin_unlock(&port->mp.mpi_lock);
+ mlx5_remove_netdev_notifier(ibdev, port_num);
+ spin_lock(&port->mp.mpi_lock);
- dev->mdev = mdev;
+ comps = mpi->mdev_refcnt;
+ if (comps) {
+ mpi->unaffiliate = true;
+ init_completion(&mpi->unref_comp);
+ spin_unlock(&port->mp.mpi_lock);
+
+ for (i = 0; i < comps; i++)
+ wait_for_completion(&mpi->unref_comp);
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi->unaffiliate = false;
+ }
+
+ port->mp.mpi = NULL;
+
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+
+ spin_unlock(&port->mp.mpi_lock);
+
+ err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
+
+ mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
+ /* Log an error, still needed to cleanup the pointers and add
+ * it back to the list.
+ */
+ if (err)
+ mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
+ port_num + 1);
+
+ ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+}
+
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ int err;
+
+ spin_lock(&ibdev->port[port_num].mp.mpi_lock);
+ if (ibdev->port[port_num].mp.mpi) {
+ mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
+ port_num + 1);
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+ return false;
+ }
+
+ ibdev->port[port_num].mp.mpi = mpi;
+ mpi->ibdev = ibdev;
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+
+ err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
+ if (err)
+ goto unbind;
+
+ err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
+ if (err)
+ goto unbind;
+
+ err = mlx5_add_netdev_notifier(ibdev, port_num);
+ if (err) {
+ mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
+ port_num + 1);
+ goto unbind;
+ }
+
+ err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
+ if (err)
+ goto unbind;
+
+ return true;
+
+unbind:
+ mlx5_ib_unbind_slave_port(ibdev, mpi);
+ return false;
+}
+
+static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ struct mlx5_ib_multiport_info *mpi;
+ int err;
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return 0;
+
+ err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
+ &dev->sys_image_guid);
+ if (err)
+ return err;
- dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ bool bound = false;
+
+ /* build a stub multiport info struct for the native port. */
+ if (i == port_num) {
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi) {
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ mlx5_nic_vport_disable_roce(dev->mdev);
+ return -ENOMEM;
+ }
+
+ mpi->is_master = true;
+ mpi->mdev = dev->mdev;
+ mpi->sys_image_guid = dev->sys_image_guid;
+ dev->port[i].mp.mpi = mpi;
+ mpi->ibdev = dev;
+ mpi = NULL;
+ continue;
+ }
+
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+ if (bound) {
+ dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
+ mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
+ list_del(&mpi->list);
+ break;
+ }
+ }
+ if (!bound) {
+ get_port_caps(dev, i + 1);
+ mlx5_ib_dbg(dev, "no free port found for port %d\n",
+ i + 1);
+ }
+ }
+
+ list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return err;
+}
+
+static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ if (dev->port[i].mp.mpi) {
+ /* Destroy the native port stub */
+ if (i == port_num) {
+ kfree(dev->port[i].mp.mpi);
+ dev->port[i].mp.mpi = NULL;
+ } else {
+ mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
+ mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
+ }
+ }
+ }
+
+ mlx5_ib_dbg(dev, "removing from devlist\n");
+ list_del(&dev->ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
+
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_multiport_master(dev);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ cleanup_srcu_struct(&dev->mr_srcu);
+#endif
+ kfree(dev->port);
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ const char *name;
+ int err;
+ int i;
+
+ dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
GFP_KERNEL);
if (!dev->port)
- goto err_dealloc;
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ spin_lock_init(&dev->port[i].mp.mpi_lock);
+ rwlock_init(&dev->roce[i].netdev_lock);
+ }
- rwlock_init(&dev->roce.netdev_lock);
- err = get_port_caps(dev);
+ err = mlx5_ib_init_multiport_master(dev);
if (err)
goto err_free_port;
+ if (!mlx5_core_mp_enabled(mdev)) {
+ int i;
+
+ for (i = 1; i <= dev->num_ports; i++) {
+ err = get_port_caps(dev, i);
+ if (err)
+ break;
+ }
+ } else {
+ err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
+ }
+ if (err)
+ goto err_mp;
+
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
@@ -3978,12 +4559,37 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
- dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ mutex_init(&dev->flow_db.lock);
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ err = init_srcu_struct(&dev->mr_srcu);
+ if (err)
+ goto err_free_port;
+#endif
+
+ return 0;
+err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+
+err_free_port:
+ kfree(dev->port);
+
+ return -ENOMEM;
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -4022,8 +4628,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
- if (ll == IB_LINK_LAYER_ETHERNET)
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.query_gid = mlx5_ib_query_gid;
dev->ib_dev.add_gid = mlx5_ib_add_gid;
dev->ib_dev.del_gid = mlx5_ib_del_gid;
@@ -4080,8 +4684,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
- mlx5_ib_internal_fill_odp_caps(dev);
-
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -4092,11 +4694,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
- dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
- }
-
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -4111,8 +4708,39 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
- if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ err = init_node_data(dev);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ mutex_init(&dev->lb_mutex);
+
+ return 0;
+}
+
+static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+ int err;
+ int i;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ for (i = 0; i < dev->num_ports; i++) {
+ dev->roce[i].dev = dev;
+ dev->roce[i].native_port_num = i + 1;
+ dev->roce[i].last_port_state = IB_PORT_DOWN;
+ }
+
+ dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.create_wq = mlx5_ib_create_wq;
dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
@@ -4124,143 +4752,329 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ err = mlx5_enable_eth(dev, port_num);
+ if (err)
+ return err;
}
- err = init_node_data(dev);
- if (err)
- goto err_free_port;
- mutex_init(&dev->flow_db.lock);
- mutex_init(&dev->cap_mask_mutex);
- INIT_LIST_HEAD(&dev->qp_list);
- spin_lock_init(&dev->reset_flow_resource_lock);
+ return 0;
+}
+
+static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_enable_eth(dev);
- if (err)
- goto err_free_port;
- dev->roce.last_port_state = IB_PORT_DOWN;
+ mlx5_disable_eth(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
}
+}
- err = create_dev_resources(&dev->devr);
- if (err)
- goto err_disable_eth;
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_dev_resources(&dev->devr);
+}
- err = mlx5_ib_odp_init_one(dev);
- if (err)
- goto err_rsrc;
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_dev_resources(&dev->devr);
+}
+static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_internal_fill_odp_caps(dev);
+
+ return mlx5_ib_odp_init_one(dev);
+}
+
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- err = mlx5_ib_alloc_counters(dev);
- if (err)
- goto err_odp;
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+
+ return mlx5_ib_alloc_counters(dev);
}
- err = mlx5_ib_init_cong_debugfs(dev);
- if (err)
- goto err_cnt;
+ return 0;
+}
+
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ mlx5_ib_dealloc_counters(dev);
+}
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ return mlx5_ib_init_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+{
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- if (IS_ERR(dev->mdev->priv.uar))
- goto err_cong;
+ if (!dev->mdev->priv.uar)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+ int err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
- goto err_uar_page;
+ return err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- goto err_bfreg;
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- err = ib_register_device(&dev->ib_dev, NULL);
- if (err)
- goto err_fp_bfreg;
+ return err;
+}
- err = create_umr_res(dev);
- if (err)
- goto err_dev;
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
+
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+ return ib_register_device(&dev->ib_dev, NULL);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ ib_unregister_device(&dev->ib_dev);
+}
+static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_umr_res(dev);
+}
+
+static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_umrc_res(dev);
+}
+
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
init_delay_drop(dev);
+ return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+ cancel_delay_drop(dev);
+}
+
+static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
+{
+ int err;
+ int i;
+
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
err = device_create_file(&dev->ib_dev.dev,
mlx5_class_attributes[i]);
if (err)
- goto err_delay_drop;
+ return err;
}
- if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
- MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
- mutex_init(&dev->lb_mutex);
+ return 0;
+}
+
+static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
+{
+ /* Number of stages to cleanup */
+ while (stage) {
+ stage--;
+ if (profile->stage[stage].cleanup)
+ profile->stage[stage].cleanup(dev);
+ }
+
+ ib_dealloc_device((struct ib_device *)dev);
+}
+
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
+
+static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
+ const struct mlx5_ib_profile *profile)
+{
+ struct mlx5_ib_dev *dev;
+ int err;
+ int i;
+
+ printk_once(KERN_INFO "%s", mlx5_version);
+ dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ dev->mdev = mdev;
+ dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
+
+ for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+ if (profile->stage[i].init) {
+ err = profile->stage[i].init(dev);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ dev->profile = profile;
dev->ib_active = true;
return dev;
-err_delay_drop:
- cancel_delay_drop(dev);
- destroy_umrc_res(dev);
+err_out:
+ __mlx5_ib_remove(dev, profile, i);
-err_dev:
- ib_unregister_device(&dev->ib_dev);
+ return NULL;
+}
-err_fp_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+static const struct mlx5_ib_profile pf_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_stage_roce_init,
+ mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_stage_dev_res_init,
+ mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ mlx5_ib_stage_odp_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_stage_counters_init,
+ mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+ mlx5_ib_stage_umr_res_init,
+ mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ mlx5_ib_stage_class_attr_init,
+ NULL),
+};
-err_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
+{
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
+ bool bound = false;
+ int err;
-err_uar_page:
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi)
+ return NULL;
-err_cong:
- mlx5_ib_cleanup_cong_debugfs(dev);
-err_cnt:
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
+ mpi->mdev = mdev;
-err_odp:
- mlx5_ib_odp_remove_one(dev);
+ err = mlx5_query_nic_vport_system_image_guid(mdev,
+ &mpi->sys_image_guid);
+ if (err) {
+ kfree(mpi);
+ return NULL;
+ }
-err_rsrc:
- destroy_dev_resources(&dev->devr);
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid)
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
-err_disable_eth:
- if (ll == IB_LINK_LAYER_ETHERNET) {
- mlx5_disable_eth(dev);
- mlx5_remove_netdev_notifier(dev);
+ if (bound) {
+ rdma_roce_rescan_device(&dev->ib_dev);
+ break;
+ }
}
-err_free_port:
- kfree(dev->port);
+ if (!bound) {
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+ dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ } else {
+ mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
+ }
+ mutex_unlock(&mlx5_ib_multiport_mutex);
-err_dealloc:
- ib_dealloc_device((struct ib_device *)dev);
+ return mpi;
+}
- return NULL;
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+{
+ enum rdma_link_layer ll;
+ int port_type_cap;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
+ u8 port_num = mlx5_core_native_port_num(mdev) - 1;
+
+ return mlx5_ib_add_slave_port(mdev, port_num);
+ }
+
+ return __mlx5_ib_add(mdev, &pf_profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
- struct mlx5_ib_dev *dev = context;
- enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
- cancel_delay_drop(dev);
- mlx5_remove_netdev_notifier(dev);
- ib_unregister_device(&dev->ib_dev);
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
- mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
- mlx5_ib_cleanup_cong_debugfs(dev);
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
- destroy_umrc_res(dev);
- mlx5_ib_odp_remove_one(dev);
- destroy_dev_resources(&dev->devr);
- if (ll == IB_LINK_LAYER_ETHERNET)
- mlx5_disable_eth(dev);
- kfree(dev->port);
- ib_dealloc_device(&dev->ib_dev);
+ if (mlx5_core_is_mp_slave(mdev)) {
+ mpi = context;
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ if (mpi->ibdev)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return;
+ }
+
+ dev = context;
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
static struct mlx5_interface mlx5_ib_interface = {
@@ -4277,6 +5091,10 @@ static int __init mlx5_ib_init(void)
{
int err;
+ mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
+ if (!mlx5_ib_event_wq)
+ return -ENOMEM;
+
mlx5_ib_odp_init();
err = mlx5_register_interface(&mlx5_ib_interface);
@@ -4287,6 +5105,7 @@ static int __init mlx5_ib_init(void)
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
+ destroy_workqueue(mlx5_ib_event_wq);
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2c5f3533bbc9..139385129973 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -70,15 +70,6 @@ enum {
MLX5_IB_MMAP_CMD_MASK = 0xff,
};
-enum mlx5_ib_mmap_cmd {
- MLX5_IB_MMAP_REGULAR_PAGE = 0,
- MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
- MLX5_IB_MMAP_WC_PAGE = 2,
- MLX5_IB_MMAP_NC_PAGE = 3,
- /* 5 is chosen in order to be compatible with old versions of libmlx5 */
- MLX5_IB_MMAP_CORE_CLOCK = 5,
-};
-
enum {
MLX5_RES_SCAT_DATA32_CQE = 0x1,
MLX5_RES_SCAT_DATA64_CQE = 0x2,
@@ -112,6 +103,11 @@ enum {
MLX5_TM_MAX_SGE = 1,
};
+enum {
+ MLX5_IB_INVALID_UAR_INDEX = BIT(31),
+ MLX5_IB_INVALID_BFREG = BIT(31),
+};
+
struct mlx5_ib_vma_private_data {
struct list_head list;
struct vm_area_struct *vma;
@@ -200,6 +196,8 @@ struct mlx5_ib_flow_db {
* creates the actual hardware QP.
*/
#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
+#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
+#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
#define MLX5_IB_UMR_OCTOWORD 16
@@ -360,12 +358,18 @@ struct mlx5_bf {
struct mlx5_sq_bfreg *bfreg;
};
+struct mlx5_ib_dct {
+ struct mlx5_core_dct mdct;
+ u32 *in;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
union {
struct mlx5_ib_qp_trans trans_qp;
struct mlx5_ib_raw_packet_qp raw_packet_qp;
struct mlx5_ib_rss_qp rss_qp;
+ struct mlx5_ib_dct dct;
};
struct mlx5_buf buf;
@@ -404,6 +408,8 @@ struct mlx5_ib_qp {
u32 rate_limit;
u32 underlay_qpn;
bool tunnel_offload_en;
+ /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
+ enum ib_qp_type qp_sub_type;
};
struct mlx5_ib_cq_buf {
@@ -636,10 +642,21 @@ struct mlx5_ib_counters {
u32 num_q_counters;
u32 num_cong_counters;
u16 set_id;
+ bool set_id_valid;
+};
+
+struct mlx5_ib_multiport_info;
+
+struct mlx5_ib_multiport {
+ struct mlx5_ib_multiport_info *mpi;
+ /* To be held when accessing the multiport info */
+ spinlock_t mpi_lock;
};
struct mlx5_ib_port {
struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
};
struct mlx5_roce {
@@ -651,12 +668,15 @@ struct mlx5_roce {
struct notifier_block nb;
atomic_t next_port;
enum ib_port_state last_port_state;
+ struct mlx5_ib_dev *dev;
+ u8 native_port_num;
};
struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct dentry *dentry;
+ u8 port_num;
};
enum mlx5_ib_dbg_cc_types {
@@ -709,10 +729,50 @@ struct mlx5_ib_delay_drop {
struct mlx5_ib_dbg_delay_drop *dbg;
};
+enum mlx5_ib_stages {
+ MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_ODP,
+ MLX5_IB_STAGE_COUNTERS,
+ MLX5_IB_STAGE_CONG_DEBUGFS,
+ MLX5_IB_STAGE_UAR,
+ MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_IB_REG,
+ MLX5_IB_STAGE_UMR_RESOURCES,
+ MLX5_IB_STAGE_DELAY_DROP,
+ MLX5_IB_STAGE_CLASS_ATTR,
+ MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+ int (*init)(struct mlx5_ib_dev *dev);
+ void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+ .stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+ struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
+struct mlx5_ib_multiport_info {
+ struct list_head list;
+ struct mlx5_ib_dev *ibdev;
+ struct mlx5_core_dev *mdev;
+ struct completion unref_comp;
+ u64 sys_image_guid;
+ u32 mdev_refcnt;
+ bool is_master;
+ bool unaffiliate;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
- struct mlx5_roce roce;
+ struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
/* serialize update of capability mask
*/
@@ -746,12 +806,14 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
- struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ const struct mlx5_ib_profile *profile;
/* protect the user_td */
struct mutex lb_mutex;
u32 user_td;
u8 umr_fence;
+ struct list_head ib_dev_list;
+ u64 sys_image_guid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -956,13 +1018,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
@@ -977,7 +1040,6 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
}
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
-static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
@@ -1001,8 +1063,8 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
int index, enum ib_gid_type *gid_type);
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
@@ -1021,6 +1083,15 @@ void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
+ int bfregn);
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 ib_port_num,
+ u8 *native_port_num);
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 port_num);
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -1052,8 +1123,8 @@ static inline u32 check_cq_create_flags(u32 flags)
* It returns non-zero value for unsupported CQ
* create flags, otherwise it returns zero.
*/
- return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
}
static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
@@ -1113,10 +1184,10 @@ static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_suppor
MLX5_UARS_IN_PAGE : 1;
}
-static inline int get_num_uars(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi)
+static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index d109fe8290a7..556e015678de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1206,6 +1206,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
bool use_umr = true;
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EINVAL);
+
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e2197bdda89c..f1a87a690a4c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1207,10 +1207,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret;
- ret = init_srcu_struct(&dev->mr_srcu);
- if (ret)
- return ret;
-
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
if (ret) {
@@ -1222,11 +1218,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
return 0;
}
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *dev)
-{
- cleanup_srcu_struct(&dev->mr_srcu);
-}
-
int mlx5_ib_odp_init(void)
{
mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cffe5966aef9..39d24bf694a8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -493,7 +493,7 @@ enum {
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
- return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
+ return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
}
static int num_med_bfreg(struct mlx5_ib_dev *dev,
@@ -581,7 +581,7 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
return bfregn;
}
-static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{
mutex_lock(&bfregi->lock);
bfregi->count[bfregn]--;
@@ -613,6 +613,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
case IB_QPT_RAW_PACKET:
case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
@@ -627,7 +628,8 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi, int bfregn)
+ struct mlx5_bfreg_info *bfregi, int bfregn,
+ bool dyn_bfreg)
{
int bfregs_per_sys_page;
int index_of_sys_page;
@@ -637,8 +639,16 @@ static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
- offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+ if (dyn_bfreg) {
+ index_of_sys_page += bfregi->num_static_sys_pages;
+ if (bfregn > bfregi->num_dyn_bfregs ||
+ bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
+ mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
+ return -EINVAL;
+ }
+ }
+ offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
return bfregi->sys_pages[index_of_sys_page] + offset;
}
@@ -764,7 +774,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
- int uar_index;
+ int uar_index = 0;
int npages;
u32 offset = 0;
int bfregn;
@@ -780,12 +790,20 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
context = to_mucontext(pd->uobject->context);
- /*
- * TBD: should come from the verbs when we have the API
- */
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi,
+ ucmd.bfreg_index, true);
+ if (uar_index < 0)
+ return uar_index;
+
+ bfregn = MLX5_IB_INVALID_BFREG;
+ } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
+ /*
+ * TBD: should come from the verbs when we have the API
+ */
/* In CROSS_CHANNEL CQ and QP must use the same UAR */
bfregn = MLX5_CROSS_CHANNEL_BFREG;
+ }
else {
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
if (bfregn < 0) {
@@ -804,8 +822,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
}
- uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
+ false);
qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -845,7 +865,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, page_offset, offset);
MLX5_SET(qpc, qpc, uar_page, uar_index);
- resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ else
+ resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
@@ -874,7 +897,8 @@ err_umem:
ib_umem_release(ubuffer->umem);
err_bfreg:
- free_bfreg(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
return err;
}
@@ -887,7 +911,13 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
- free_bfreg(dev, &context->bfregi, qp->bfregn);
+
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
}
static int create_kernel_qp(struct mlx5_ib_dev *dev,
@@ -1015,6 +1045,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
+ (attr->qp_type == MLX5_IB_QPT_DCI) ||
(attr->qp_type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
@@ -2086,20 +2117,108 @@ static const char *ib_qp_type_str(enum ib_qp_type type)
return "IB_QPT_RAW_PACKET";
case MLX5_IB_QPT_REG_UMR:
return "MLX5_IB_QPT_REG_UMR";
+ case IB_QPT_DRIVER:
+ return "IB_QPT_DRIVER";
case IB_QPT_MAX:
default:
return "Invalid QP type";
}
}
+static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct mlx5_ib_create_qp *ucmd)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ int err = 0;
+ u32 uidx = MLX5_IB_DEFAULT_UIDX;
+ void *dctc;
+
+ if (!attr->srq || !attr->recv_cq)
+ return ERR_PTR(-EINVAL);
+
+ dev = to_mdev(pd->device);
+
+ err = get_qp_user_index(to_mucontext(pd->uobject->context),
+ ucmd, sizeof(*ucmd), &uidx);
+ if (err)
+ return ERR_PTR(err);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ qp->qp_sub_type = MLX5_IB_QPT_DCT;
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+
+ qp->state = IB_QPS_RESET;
+
+ return &qp->ibqp;
+err_free:
+ kfree(qp);
+ return ERR_PTR(err);
+}
+
+static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_create_qp *ucmd,
+ struct ib_udata *udata)
+{
+ enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
+ int err;
+
+ if (!udata)
+ return -EINVAL;
+
+ if (udata->inlen < sizeof(*ucmd)) {
+ mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
+ return -EINVAL;
+ }
+ err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
+ if (err)
+ return err;
+
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
+ init_attr->qp_type = MLX5_IB_QPT_DCI;
+ } else {
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
+ init_attr->qp_type = MLX5_IB_QPT_DCT;
+ } else {
+ mlx5_ib_dbg(dev, "Invalid QP flags\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!MLX5_CAP_GEN(dev->mdev, dct)) {
+ mlx5_ib_dbg(dev, "DC transport is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
+ struct ib_qp_init_attr *verbs_init_attr,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev;
struct mlx5_ib_qp *qp;
u16 xrcdn = 0;
int err;
+ struct ib_qp_init_attr mlx_init_attr;
+ struct ib_qp_init_attr *init_attr = verbs_init_attr;
if (pd) {
dev = to_mdev(pd->device);
@@ -2124,6 +2243,26 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
+ if (init_attr->qp_type == IB_QPT_DRIVER) {
+ struct mlx5_ib_create_qp ucmd;
+
+ init_attr = &mlx_init_attr;
+ memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
+ err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
+ if (err)
+ return ERR_PTR(err);
+
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
+ if (init_attr->cap.max_recv_wr ||
+ init_attr->cap.max_recv_sge) {
+ mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ return mlx5_ib_create_dct(pd, init_attr, &ucmd);
+ }
+ }
+
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
@@ -2145,6 +2284,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
+ case MLX5_IB_QPT_DCI:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -2185,9 +2325,31 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
+ if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
+ qp->qp_sub_type = init_attr->qp_type;
+
return &qp->ibqp;
}
+static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+
+ if (mqp->state == IB_QPS_RTR) {
+ int err;
+
+ err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
+ return err;
+ }
+ }
+
+ kfree(mqp->dct.in);
+ kfree(mqp);
+ return 0;
+}
+
int mlx5_ib_destroy_qp(struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -2196,6 +2358,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
+ if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_destroy_dct(mqp);
+
destroy_qp_common(dev, mqp);
kfree(mqp);
@@ -2763,7 +2928,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!context)
return -ENOMEM;
- err = to_mlx5_st(ibqp->qp_type);
+ err = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
goto out;
@@ -2796,8 +2962,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
+ u8 p = mlx5_core_native_port_num(dev->mdev);
tx_affinity = (unsigned int)atomic_add_return(1,
- &dev->roce.next_port) %
+ &dev->roce[p].next_port) %
MLX5_MAX_PORTS + 1;
context->flags |= cpu_to_be32(tx_affinity << 24);
}
@@ -2922,7 +3089,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
- mlx5_st = to_mlx5_st(ibqp->qp_type);
+ mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (mlx5_st < 0)
goto out;
@@ -2994,6 +3162,139 @@ out:
return err;
}
+static inline bool is_valid_mask(int mask, int req, int opt)
+{
+ if ((mask & req) != req)
+ return false;
+
+ if (mask & ~(req | opt))
+ return false;
+
+ return true;
+}
+
+/* check valid transition for driver QP types
+ * for now the only QP type that this function supports is DCI
+ */
+static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
+ enum ib_qp_attr_mask attr_mask)
+{
+ int req = IB_QP_STATE;
+ int opt = 0;
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ req |= IB_QP_PATH_MTU;
+ opt = IB_QP_PKEY_INDEX;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
+ return is_valid_mask(attr_mask, req, opt);
+ }
+ return false;
+}
+
+/* mlx5_ib_modify_dct: modify a DCT QP
+ * valid transitions are:
+ * RESET to INIT: must set access_flags, pkey_index and port
+ * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
+ * mtu, gid_index and hop_limit
+ * Other transitions and attributes are illegal
+ */
+static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ enum ib_qp_state cur_state, new_state;
+ int err = 0;
+ int required = IB_QP_STATE;
+ void *dctc;
+
+ if (!(attr_mask & IB_QP_STATE))
+ return -EINVAL;
+
+ cur_state = qp->state;
+ new_state = attr->qp_state;
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+
+ if (attr->port_num == 0 ||
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
+ return -EINVAL;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ MLX5_SET(dctc, dctc, rre, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ MLX5_SET(dctc, dctc, rwe, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ if (!mlx5_ib_dc_atomic_is_supported(dev))
+ return -EOPNOTSUPP;
+ MLX5_SET(dctc, dctc, rae, 1);
+ MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
+ }
+ MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
+ MLX5_SET(dctc, dctc, port, attr->port_num);
+ MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
+
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ struct mlx5_ib_modify_qp_resp resp = {};
+ u32 min_resp_len = offsetof(typeof(resp), dctn) +
+ sizeof(resp.dctn);
+
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+ resp.response_length = min_resp_len;
+
+ required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+ MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
+ MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
+ MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+
+ err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in));
+ if (err)
+ return err;
+ resp.dctn = qp->dct.mdct.mqp.qpn;
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
+ return err;
+ }
+ } else {
+ mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
+ return -EINVAL;
+ }
+ if (err)
+ qp->state = IB_QPS_ERR;
+ else
+ qp->state = new_state;
+ return err;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -3011,8 +3312,14 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
- IB_QPT_GSI : ibqp->qp_type;
+ if (ibqp->qp_type == IB_QPT_DRIVER)
+ qp_type = qp->qp_sub_type;
+ else
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
+ IB_QPT_GSI : ibqp->qp_type;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
mutex_lock(&qp->mutex);
@@ -3031,15 +3338,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ qp_type != MLX5_IB_QPT_DCI &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
+ } else if (qp_type == MLX5_IB_QPT_DCI &&
+ !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp_type, attr_mask);
+ goto out;
}
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) {
+ attr->port_num > dev->num_ports)) {
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
attr->port_num, dev->num_ports);
goto out;
@@ -4358,11 +4671,10 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
struct rdma_ah_attr *ah_attr,
struct mlx5_qp_path *path)
{
- struct mlx5_core_dev *dev = ibdev->mdev;
memset(ah_attr, 0, sizeof(*ah_attr));
- if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
+ if (!path->port || path->port > ibdev->num_ports)
return;
ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
@@ -4577,6 +4889,71 @@ out:
return err;
}
+static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_core_dct *dct = &mqp->dct.mdct;
+ u32 *out;
+ u32 access_flags = 0;
+ int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
+ void *dctc;
+ int err;
+ int supported_mask = IB_QP_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_PKEY_INDEX;
+
+ if (qp_attr_mask & ~supported_mask)
+ return -EINVAL;
+ if (mqp->state != IB_QPS_RTR)
+ return -EINVAL;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
+ if (err)
+ goto out;
+
+ dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
+
+ if (qp_attr_mask & IB_QP_STATE)
+ qp_attr->qp_state = IB_QPS_RTR;
+
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (MLX5_GET(dctc, dctc, rre))
+ access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(dctc, dctc, rwe))
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(dctc, dctc, rae))
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ qp_attr->qp_access_flags = access_flags;
+ }
+
+ if (qp_attr_mask & IB_QP_PORT)
+ qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ if (qp_attr_mask & IB_QP_AV) {
+ qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
+ qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
+ qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
+ qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
+ }
+ if (qp_attr_mask & IB_QP_PATH_MTU)
+ qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
+out:
+ kfree(out);
+ return err;
+}
+
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
@@ -4596,6 +4973,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
+ if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
+ return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
+ qp_attr_mask, qp_init_attr);
+
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
@@ -4685,13 +5066,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
int err;
err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
- if (err) {
+ if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
- return err;
- }
kfree(xrcd);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index c6fe89d79248..2fe503e86c1d 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
+ ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages);
if (ret < 0)
goto out;
@@ -623,13 +623,12 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
+ &page->mapping, GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
}
- memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i));
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
deleted file mode 100644
index 5fe56e810739..000000000000
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MTHCA_USER_H
-#define MTHCA_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define MTHCA_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mthca_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 uarc_size;
-};
-
-struct mthca_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct mthca_reg_mr {
-/*
- * Mark the memory region with a DMA attribute that causes
- * in-flight DMA to be flushed when the region is written to:
- */
-#define MTHCA_MR_DMASYNC 0x1
- __u32 mr_attrs;
- __u32 reserved;
-};
-
-struct mthca_create_cq {
- __u32 lkey;
- __u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
- __u32 arm_db_index;
- __u32 set_db_index;
-};
-
-struct mthca_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mthca_resize_cq {
- __u32 lkey;
- __u32 reserved;
-};
-
-struct mthca_create_srq {
- __u32 lkey;
- __u32 db_index;
- __u64 db_page;
-};
-
-struct mthca_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mthca_create_qp {
- __u32 lkey;
- __u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
- __u32 sq_db_index;
- __u32 rq_db_index;
-};
-
-#endif /* MTHCA_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index c56ca2a74df5..6cdfbf8c5674 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1365,7 +1365,7 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
if (cm_node->accept_pend) {
BUG_ON(!cm_node->listener);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index d827d03e3941..b9cc02b4e8d5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -279,7 +279,6 @@ struct nes_cm_tcp_context {
u8 rcv_wscale;
struct nes_cm_tsa_context tsa_cntxt;
- struct timeval sent_ts;
};
@@ -341,7 +340,7 @@ struct nes_cm_node {
u16 mpa_frame_size;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct nes_cm_listener *listener;
enum nes_cm_conn_type conn_type;
struct nes_vnic *nesvnic;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0ba695a88b62..9904918589a4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,11 +380,10 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
+ &q->dma, GFP_KERNEL);
if (!q->va)
return -ENOMEM;
- memset(q->va, 0, q->size);
return 0;
}
@@ -1819,12 +1818,11 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
}
- memset(cq->va, 0, cq->len);
page_size = cq->len / hw_pages;
cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
@@ -2212,10 +2210,9 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
- memset(qp->sq.va, 0, len);
qp->sq.len = len;
qp->sq.pa = pa;
qp->sq.entry_size = dev->attr.wqe_size;
@@ -2263,10 +2260,9 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
- memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
qp->rq.entry_size = dev->attr.rqe_size;
@@ -2320,11 +2316,10 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
- &pa, GFP_KERNEL);
+ qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
- memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
for (; i < ird_q_len / dev->attr.rqe_size; i++) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index e528d7acb7f6..24d20a4aa262 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,15 +73,13 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}
- memset(mem->va, 0, mem->size);
-
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
if (!mem->debugfs_mem)
@@ -834,7 +832,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+ if (!debugfs_create_file("reset_stats", 0200, dev->dir,
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7866fd8051f6..8009bdad4e5b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -461,7 +461,7 @@ retry:
static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
struct ocrdma_pd *pd)
{
- return (uctx->cntxt_pd == pd ? true : false);
+ return (uctx->cntxt_pd == pd);
}
static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
@@ -550,13 +550,12 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
- memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
memset(&resp, 0, sizeof(resp));
@@ -885,13 +884,12 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
break;
}
- memset(va, 0, dma_len);
mr->pbl_table[i].va = va;
mr->pbl_table[i].pa = pa;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b26aa88dab48..53f00dbf313f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -604,12 +604,11 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < pbl_info->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
- &pa, flags);
+ va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
if (!va)
goto err;
- memset(va, 0, pbl_info->pbl_size);
pbl_table[i].va = va;
pbl_table[i].pa = pa;
}
@@ -3040,7 +3039,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swqe->wqe_size = 2;
swqe2 = qed_chain_produce(&qp->sq.pbl);
- swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
wr, bad_wr);
swqe->length = cpu_to_le32(length);
@@ -3471,9 +3470,9 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
if (qp->state != QED_ROCE_QP_STATE_ERR)
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1);
break;
@@ -3591,7 +3590,7 @@ static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
wc->byte_len = le32_to_cpu(resp->length);
if (resp->flags & QEDR_RESP_IMM) {
- wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
wc->wc_flags |= IB_WC_WITH_IMM;
if (resp->flags & QEDR_RESP_RDMA)
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 092ed8103842..0235f76bbc72 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1428,8 +1428,6 @@ u64 qib_sps_ints(void);
*/
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
-const char *qib_get_unit_name(int unit);
-const char *qib_get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -1488,15 +1486,15 @@ extern struct mutex qib_mutex;
#define qib_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
##__VA_ARGS__)
#define qib_devinfo(pcidev, fmt, ...) \
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 33d3335385e8..3117cc5f2a9a 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -81,22 +81,6 @@ MODULE_DESCRIPTION("Intel IB driver");
struct qlogic_ib_stats qib_stats;
-const char *qib_get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), "infinipath%u", unit);
- return iname;
-}
-
-const char *qib_get_card_name(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
- return qib_get_unit_name(dd->unit);
-}
-
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 33a2e74c8495..5838b3bf34b9 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -163,8 +163,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) {
qib_dev_err(dd,
- "Can't set %s GUID from base, wraps to OUI!\n",
- qib_get_unit_name(t));
+ "Can't set GUID from base, wraps to OUI!\n");
dd->base_guid = 0;
goto bail;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 2d6a191afec0..f7593b5e2b76 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -58,7 +58,7 @@ static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
-static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static __poll_t qib_poll(struct file *, struct poll_table_struct *);
static int qib_mmapf(struct file *, struct vm_area_struct *);
/*
@@ -568,20 +568,16 @@ done:
static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
{
struct qib_pportdata *ppd = rcd->ppd;
- int i, any = 0, pidx = -1;
+ int i, pidx = -1;
+ bool any = false;
u16 lkey = key & 0x7FFF;
- int ret;
- if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+ if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
/* nothing to do; this key always valid */
- ret = 0;
- goto bail;
- }
+ return 0;
- if (!lkey) {
- ret = -EINVAL;
- goto bail;
- }
+ if (!lkey)
+ return -EINVAL;
/*
* Set the full membership bit, because it has to be
@@ -594,18 +590,14 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i] && pidx == -1)
pidx = i;
- if (rcd->pkeys[i] == key) {
- ret = -EEXIST;
- goto bail;
- }
+ if (rcd->pkeys[i] == key)
+ return -EEXIST;
}
- if (pidx == -1) {
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (pidx == -1)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i]) {
- any++;
+ any = true;
continue;
}
if (ppd->pkeys[i] == key) {
@@ -613,44 +605,34 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
if (atomic_inc_return(pkrefs) > 1) {
rcd->pkeys[pidx] = key;
- ret = 0;
- goto bail;
- } else {
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- any++;
+ return 0;
}
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ any = true;
}
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey)
/*
* It makes no sense to have both the limited and
* full membership PKEY set at the same time since
* the unlimited one will disable the limited one.
*/
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
+ return -EEXIST;
}
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!any)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i] &&
atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
rcd->pkeys[pidx] = key;
ppd->pkeys[i] = key;
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
- ret = 0;
- goto bail;
+ return 0;
}
}
- ret = -EBUSY;
-
-bail:
- return ret;
+ return -EBUSY;
}
/**
@@ -1092,12 +1074,12 @@ bail:
return ret;
}
-static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1114,12 +1096,12 @@ static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1135,10 +1117,10 @@ static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
{
struct qib_ctxtdata *rcd;
- unsigned pollflag;
+ __poll_t pollflag;
rcd = ctxt_fp(fp);
if (!rcd)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 85dfbba427f6..3990f386aa32 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1119,6 +1119,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
+
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
deleted file mode 100644
index 8fdf79f8d4e4..000000000000
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/**
- * qib_alloc_lkey - allocate an lkey
- * @mr: memory region that this lkey protects
- * @dma_region: 0->normal key, 1->restricted DMA key
- *
- * Returns 0 if successful, otherwise returns -errno.
- *
- * Increments mr reference count as required.
- *
- * Sets the lkey field mr for non-dma regions.
- *
- */
-
-int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
-{
- unsigned long flags;
- u32 r;
- u32 n;
- int ret = 0;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
-
- /* special case for dma_mr lkey == 0 */
- if (dma_region) {
- struct rvt_mregion *tmr;
-
- tmr = rcu_access_pointer(dev->dma_mr);
- if (!tmr) {
- qib_get_mr(mr);
- rcu_assign_pointer(dev->dma_mr, mr);
- mr->lkey_published = 1;
- }
- goto success;
- }
-
- /* Find the next available LKEY */
- r = rkt->next;
- n = r;
- for (;;) {
- if (rkt->table[r] == NULL)
- break;
- r = (r + 1) & (rkt->max - 1);
- if (r == n)
- goto bail;
- }
- rkt->next = (r + 1) & (rkt->max - 1);
- /*
- * Make sure lkey is never zero which is reserved to indicate an
- * unrestricted LKEY.
- */
- rkt->gen++;
- /*
- * bits are capped in qib_verbs.c to insure enough bits
- * for generation number
- */
- mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
- ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
- << 8);
- if (mr->lkey == 0) {
- mr->lkey |= 1 << 8;
- rkt->gen++;
- }
- qib_get_mr(mr);
- rcu_assign_pointer(rkt->table[r], mr);
- mr->lkey_published = 1;
-success:
- spin_unlock_irqrestore(&rkt->lock, flags);
-out:
- return ret;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = -ENOMEM;
- goto out;
-}
-
-/**
- * qib_free_lkey - free an lkey
- * @mr: mr to free from tables
- */
-void qib_free_lkey(struct rvt_mregion *mr)
-{
- unsigned long flags;
- u32 lkey = mr->lkey;
- u32 r;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (!mr->lkey_published)
- goto out;
- if (lkey == 0)
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- else {
- r = lkey >> (32 - ib_rvt_lkey_table_size);
- RCU_INIT_POINTER(rkt->table[r], NULL);
- }
- qib_put_mr(mr);
- mr->lkey_published = 0;
-out:
- spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * qib_rkey_ok - check the IB virtual address, length, and RKEY
- * @qp: qp for validation
- * @sge: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- *
- * increments the reference count upon success
- */
-int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc)
-{
- struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct rvt_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /* We use RKEY == zero for kernel virtual addresses */
- rcu_read_lock();
- if (rkey == 0) {
- struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- sge->mr = mr;
- sge->vaddr = (void *) vaddr;
- sge->length = len;
- sge->sge_length = len;
- sge->m = 0;
- sge->n = 0;
- goto ok;
- }
-
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
- goto bail;
-
- off = vaddr - mr->iova;
- if (unlikely(vaddr < mr->iova || off + len > mr->length ||
- (mr->access_flags & acc) == 0))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / RVT_SEGSZ;
- n = entries_spanned_by_off % RVT_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- sge->mr = mr;
- sge->vaddr = mr->map[m]->segs[n].vaddr + off;
- sge->length = mr->map[m]->segs[n].length - off;
- sge->sge_length = len;
- sge->m = m;
- sge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8f5754fb8579..cfddff45413f 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head))
goto bail;
/*
@@ -434,13 +432,13 @@ no_flow_control:
qp->s_state = OP(COMPARE_SWAP);
put_ib_ateth_swap(wqe->atomic_wr.swap,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
} else {
qp->s_state = OP(FETCH_ADD);
put_ib_ateth_swap(wqe->atomic_wr.compare_add,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
}
put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
&ohdr->u.atomic_eth);
@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
goto ack_done;
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 9a37e844d4c8..4662cc7bde92 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index bddcc37ace44..70c58b88192c 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 15962ed193ce..386c3c4da0c7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index c55000501582..fabee760407e 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1571,7 +1571,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
if (!ib_qib_sys_image_guid)
ib_qib_sys_image_guid = ppd->guid;
- strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
ibdev->owner = THIS_MODULE;
ibdev->node_guid = ppd->guid;
ibdev->phys_port_cnt = dd->num_pports;
@@ -1586,7 +1585,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 685ef2293cb8..4210ca14014d 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -45,7 +45,6 @@
#include "usnic_ib_verbs.h"
#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-#include "usnic_ib_sysfs.h"
static ssize_t usnic_ib_show_board(struct device *device,
struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index aa2456a4f9bd..a688a5669168 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -47,7 +47,6 @@
#include "usnic_log.h"
#include "usnic_uiom.h"
#include "usnic_transport.h"
-#include "usnic_ib_verbs.h"
#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 4f7bd3b6a315..44cb1cfba417 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -93,7 +93,7 @@ struct pvrdma_cq {
struct pvrdma_page_dir pdir;
u32 cq_handle;
bool is_kernel;
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion free;
};
@@ -196,7 +196,7 @@ struct pvrdma_qp {
u8 state;
bool is_kernel;
struct mutex mutex; /* QP state mutex. */
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion free;
};
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index e529622cefad..faa9478c14a6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->ibcq.cqe = entries;
+ cq->is_kernel = !context;
- if (context) {
+ if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_cq;
@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
npages = ib_umem_page_count(cq->umem);
} else {
- cq->is_kernel = true;
-
/* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
PAGE_SIZE - 1) / PAGE_SIZE;
@@ -178,7 +177,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
else
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
- atomic_set(&cq->refcnt, 1);
+ refcount_set(&cq->refcnt, 1);
init_completion(&cq->free);
spin_lock_init(&cq->cq_lock);
@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
- if (context) {
+ if (!cq->is_kernel) {
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
err_page_dir:
pvrdma_page_dir_cleanup(dev, &cq->pdir);
err_umem:
- if (context)
+ if (!cq->is_kernel)
ib_umem_release(cq->umem);
err_cq:
atomic_dec(&dev->num_cqs);
@@ -230,7 +229,7 @@ err_cq:
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index e92681878c93..d650a9fcde24 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -243,13 +243,13 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
mutex_init(&dev->port_mutex);
spin_lock_init(&dev->desc_lock);
- dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
+ dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
GFP_KERNEL);
if (!dev->cq_tbl)
return ret;
spin_lock_init(&dev->cq_tbl_lock);
- dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
+ dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
GFP_KERNEL);
if (!dev->qp_tbl)
goto err_cq_free;
@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
if (qp)
- atomic_inc(&qp->refcnt);
+ refcount_inc(&qp->refcnt);
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
if (qp && qp->ibqp.event_handler) {
@@ -346,7 +346,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context);
}
if (qp) {
- if (atomic_dec_and_test(&qp->refcnt))
+ if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
}
}
@@ -359,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.event_handler) {
@@ -372,7 +372,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context);
}
if (cq) {
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
}
@@ -531,13 +531,13 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) {
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
@@ -882,8 +882,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);
- dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
- &dev->dsrbase, GFP_KERNEL);
+ dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
@@ -891,7 +891,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
}
/* Setup the shared region */
- memset(dev->dsr, 0, sizeof(*dev->dsr));
dev->dsr->driver_version = PVRDMA_VERSION;
dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
PVRDMA_GOS_BITS_32 :
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index 8519f3212e52..fa96fa4fb829 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -119,10 +119,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
- int nchunks;
int ret;
- int entry;
- struct scatterlist *sg;
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -137,13 +134,9 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(umem);
}
- nchunks = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
- nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
-
- if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
- nchunks);
+ umem->npages);
ret = -EINVAL;
goto err_umem;
}
@@ -158,7 +151,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mmr.size = length;
mr->umem = umem;
- ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
if (ret) {
dev_warn(&dev->pdev->dev,
"could not allocate page directory\n");
@@ -175,7 +168,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
cmd->length = length;
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->access_flags = access_flags;
- cmd->nchunks = nchunks;
+ cmd->nchunks = umem->npages;
cmd->pdir_dma = mr->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 4059308e1454..7bf518bdbf21 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -245,12 +245,13 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex);
- atomic_set(&qp->refcnt, 1);
+ refcount_set(&qp->refcnt, 1);
init_completion(&qp->free);
qp->state = IB_QPS_RESET;
+ qp->is_kernel = !(pd->uobject && udata);
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
dev_dbg(&dev->pdev->dev,
"create queuepair from user space\n");
@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
- qp->is_kernel = true;
-
ret = pvrdma_set_sq_size(to_vdev(pd->device),
&init_attr->cap, qp);
if (ret)
@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir);
err_umem:
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
@@ -428,7 +427,7 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
- if (atomic_dec_and_test(&qp->refcnt))
+ if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 97d71e49c092..fb52b669bfce 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -56,7 +56,7 @@
* rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
- * @sig: true if @entry is solicited
+ * @solicited: true if @entry is solicited
*
* This may be called with qp->s_lock held.
*/
@@ -101,8 +101,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
wc->uqueue[head].opcode = entry->opcode;
wc->uqueue[head].vendor_err = entry->vendor_err;
wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data =
- (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
wc->uqueue[head].qp_num = entry->qp->qp_num;
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
@@ -198,7 +197,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
return ERR_PTR(-EINVAL);
/* Allocate the completion queue structure. */
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -214,7 +213,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
else
sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
@@ -369,7 +370,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
else
sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index b3a38c5e4cad..dd11c6fcd060 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -272,7 +272,7 @@ bail:
/**
* rvt_attach_mcast - attach a qp to a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
@@ -335,7 +335,7 @@ bail_mcast:
/**
* rvt_detach_mcast - remove a qp from a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 42713511b53b..1b2e5362a3ff 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -768,7 +768,7 @@ bail:
/**
* rvt_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
+ * @ibfmr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 9177df60742a..c82e6bb3d77c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -269,7 +269,7 @@ no_qp_table:
/**
* free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
+ * @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
@@ -335,9 +335,9 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
/**
* alloc_qpn - Allocate the next available qpn or zero/one for QP type
* IB_QPT_SMI/IB_QPT_GSI
- *@rdi: rvt device info structure
- *@qpt: queue pair number table pointer
- *@port_num: IB port number, 1 based, comes from core
+ * @rdi: rvt device info structure
+ * @qpt: queue pair number table pointer
+ * @port_num: IB port number, 1 based, comes from core
*
* Return: The queue pair number
*/
@@ -1650,9 +1650,9 @@ static inline int rvt_qp_valid_operation(
/**
* rvt_qp_is_avail - determine queue capacity
- * @qp - the qp
- * @rdi - the rdmavt device
- * @reserved_op - is reserved operation
+ * @qp: the qp
+ * @rdi: the rdmavt device
+ * @reserved_op: is reserved operation
*
* This assumes the s_hlock is held but the s_last
* qp variable is uncontrolled.
@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
/* non-reserved operations */
if (likely(qp->s_avail))
return 0;
- smp_read_barrier_depends(); /* see rc.c */
slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
@@ -2075,6 +2074,7 @@ void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
to = rvt_aeth_to_usec(aeth);
+ trace_rvt_rnrnak_add(qp, to);
hrtimer_start(&qp->s_rnr_timer,
ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
}
@@ -2104,17 +2104,14 @@ EXPORT_SYMBOL(rvt_stop_rc_timers);
* stop an rnr timer and return if the timer
* had been pending.
*/
-static int rvt_stop_rnr_timer(struct rvt_qp *qp)
+static void rvt_stop_rnr_timer(struct rvt_qp *qp)
{
- int rval = 0;
-
lockdep_assert_held(&qp->s_lock);
/* Remove QP from rnr timer */
if (qp->s_flags & RVT_S_WAIT_RNR) {
qp->s_flags &= ~RVT_S_WAIT_RNR;
- rval = hrtimer_try_to_cancel(&qp->s_rnr_timer);
+ trace_rvt_rnrnak_stop(qp, 0);
}
- return rval;
}
/**
@@ -2167,6 +2164,7 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
spin_lock_irqsave(&qp->s_lock, flags);
rvt_stop_rnr_timer(qp);
+ trace_rvt_rnrnak_timeout(qp, 0);
rdi->driver_f.schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
return HRTIMER_NORESTART;
@@ -2175,8 +2173,8 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
/**
* rvt_qp_iter_init - initial for QP iteration
- * @rdi - rvt devinfo
- * @v - u64 value
+ * @rdi: rvt devinfo
+ * @v: u64 value
*
* This returns an iterator suitable for iterating QPs
* in the system.
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index f7c48e9023de..3707952b4364 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -90,7 +90,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
return ERR_PTR(-EINVAL);
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
if (!srq)
return ERR_PTR(-ENOMEM);
@@ -101,7 +101,10 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct rvt_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
+ srq->rq.wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
+ dev->dparms.node);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
@@ -129,16 +132,12 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
ret = ERR_PTR(err);
goto bail_ip;
}
- } else {
- srq->ip = NULL;
}
/*
* ib_create_srq() will initialize srq->ibsrq.
*/
spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
@@ -200,7 +199,10 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
+ wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
+ dev->dparms.node);
if (!wq)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index bb4b1e710f22..36ddbd291ee0 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -45,8 +45,8 @@
*
*/
-#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
-#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
+#define RDI_DEV_ENTRY(rdi) __string(dev, rvt_get_ibdev_name(rdi))
+#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rvt_get_ibdev_name(rdi))
#include "trace_rvt.h"
#include "trace_qp.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
index 4c77a3119bda..efc9d814b032 100644
--- a/drivers/infiniband/sw/rdmavt/trace_qp.h
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -85,6 +85,48 @@ DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
TP_PROTO(struct rvt_qp *qp, u32 bucket),
TP_ARGS(qp, bucket));
+DECLARE_EVENT_CLASS(
+ rvt_rnrnak_template,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(void *, hrtimer)
+ __field(u32, s_flags)
+ __field(u32, to)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->hrtimer = &qp->s_rnr_timer;
+ __entry->s_flags = qp->s_flags;
+ __entry->to = to;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x hrtimer 0x%p s_flags 0x%x timeout %u us",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->hrtimer,
+ __entry->s_flags,
+ __entry->to
+ )
+);
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_add,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_stop,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
#endif /* __RVT_TRACE_QP_H */
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 64bdd442078a..a4553b2b3696 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -224,7 +224,8 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
* rvt_query_pkey - Return a pkey from the table at a given index
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @intex: Index into pkey table
+ * @index: Index into pkey table
+ * @pkey: returned pkey from the port pkey table
*
* Return: 0 on failure pkey otherwise
*/
@@ -255,7 +256,7 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
* rvt_query_gid - Return a gid from the table
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @index: = Index in table
+ * @guid_index: Index in table
* @gid: Gid to return
*
* Return: 0 on success
@@ -297,8 +298,8 @@ static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
/**
* rvt_alloc_ucontext - Allocate a user context
- * @ibdev: Vers IB dev
- * @data: User data allocated
+ * @ibdev: Verbs IB dev
+ * @udata: User data allocated
*/
static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
@@ -413,7 +414,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* required for rdmavt to function.
*/
if ((!rdi->driver_f.port_callback) ||
- (!rdi->driver_f.get_card_name) ||
(!rdi->driver_f.get_pci_dev))
return -EINVAL;
break;
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
index f363505312be..8823b2e7aac6 100644
--- a/drivers/infiniband/sw/rdmavt/vt.h
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -63,19 +63,19 @@
#define rvt_pr_info(rdi, fmt, ...) \
__rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_warn(rdi, fmt, ...) \
__rvt_pr_warn(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_err(rdi, fmt, ...) \
__rvt_pr_err(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 320bffc980d8..bad4a576d7cf 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,8 +1,8 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
- depends on NET_UDP_TUNNEL
- depends on CRYPTO_CRC32
+ select NET_UDP_TUNNEL
+ select CRYPTO_CRC32
select DMA_VIRT_OPS
---help---
This driver implements the InfiniBand RDMA transport over
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8c3d30b3092d..b7debb6f2eac 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,6 @@ void rxe_release(struct kref *kref)
ib_dealloc_device(&rxe->ib_dev);
}
-void rxe_dev_put(struct rxe_dev *rxe)
-{
- kref_put(&rxe->ref_cnt, rxe_release);
-}
-EXPORT_SYMBOL_GPL(rxe_dev_put);
-
/* initialize rxe device parameters */
static int rxe_init_device_param(struct rxe_dev *rxe)
{
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 6447d736d5a4..7d232611303f 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -57,6 +57,7 @@
#include "rxe_hdr.h"
#include "rxe_param.h"
#include "rxe_verbs.h"
+#include "rxe_loc.h"
#define RXE_UVERBS_ABI_VERSION (1)
@@ -95,7 +96,10 @@ void rxe_remove_all(void);
int rxe_rcv(struct sk_buff *skb);
-void rxe_dev_put(struct rxe_dev *rxe);
+static inline void rxe_dev_put(struct rxe_dev *rxe)
+{
+ kref_put(&rxe->ref_cnt, rxe_release);
+}
struct rxe_dev *net_to_rxe(struct net_device *ndev);
struct rxe_dev *get_rxe_by_name(const char *name);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index d7472a442a2c..96c3a6c5c4b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
void rxe_release(struct kref *kref);
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
int rxe_completer(void *arg);
int rxe_requester(void *arg);
int rxe_responder(void *arg);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 59dee10bebcb..159246b03867 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -82,7 +82,7 @@ struct rxe_dev *get_rxe_by_name(const char *name)
}
-struct rxe_recv_sockets recv_sockets;
+static struct rxe_recv_sockets recv_sockets;
struct device *rxe_dma_device(struct rxe_dev *rxe)
{
@@ -452,31 +452,26 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
- struct sk_buff *nskb;
struct rxe_av *av;
int err;
av = rxe_get_av(pkt);
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return -ENOMEM;
-
- nskb->destructor = rxe_skb_tx_dtor;
- nskb->sk = pkt->qp->sk->sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ skb->sk = pkt->qp->sk->sk;
rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (av->network_type == RDMA_NETWORK_IPV4) {
- err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else if (av->network_type == RDMA_NETWORK_IPV6) {
- err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else {
pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
atomic_dec(&pkt->qp->skb_out);
rxe_drop_ref(pkt->qp);
- kfree_skb(nskb);
+ kfree_skb(skb);
return -EINVAL;
}
@@ -485,7 +480,6 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EAGAIN;
}
- kfree_skb(skb);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 1c06b3bfe1b6..728d8c71b36a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -43,7 +43,6 @@ struct rxe_recv_sockets {
struct socket *sk6;
};
-extern struct rxe_recv_sockets recv_sockets;
extern struct notifier_block rxe_net_notifier;
void rxe_release_udp_tunnel(struct socket *sk);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 4469592b839d..137d6c0c49d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+static void rxe_qp_do_cleanup(struct work_struct *work)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
rxe_drop_all_mcast_groups(qp);
@@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg)
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
sock_release(qp->sk);
}
+
+/* called when the last reference to the qp is dropped */
+void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+{
+ struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+
+ execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index fb8c83e055e1..4c3f899241d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -336,7 +336,6 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
union ib_gid dgid;
union ib_gid *pdgid;
- u16 index;
if (skb->protocol == htons(ETH_P_IP)) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
@@ -348,7 +347,7 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
IB_GID_TYPE_ROCE_UDP_ENCAP,
- 1, rxe->ndev, &index);
+ 1, rxe->ndev, NULL);
}
/* rxe_rcv is called from the interface driver */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 26a7f923045b..7bdaf71b8221 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -594,15 +594,8 @@ int rxe_requester(void *arg)
rxe_add_ref(qp);
next_wqe:
- if (unlikely(!qp->valid)) {
- rxe_drain_req_pkts(qp, true);
+ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
goto exit;
- }
-
- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
- rxe_drain_req_pkts(qp, true);
- goto exit;
- }
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = consumer_index(qp->sq.queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 4240866a5331..d37bb9b97569 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -863,8 +863,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (pkt->mask & RXE_IMMDT_MASK) {
uwc->wc_flags |= IB_WC_WITH_IMM;
- uwc->ex.imm_data =
- (__u32 __force)immdt_imm(pkt);
+ uwc->ex.imm_data = immdt_imm(pkt);
}
if (pkt->mask & RXE_IETH_MASK) {
@@ -1210,7 +1209,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
}
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
{
struct sk_buff *skb;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index d03002b9d84d..7210a784abb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -814,6 +814,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
(queue_count(qp->sq.queue) > 1);
rxe_run_task(&qp->req.task, must_sched);
+ if (unlikely(qp->req.state == QP_STATE_ERROR))
+ rxe_run_task(&qp->comp.task, 1);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 0c2dbe45c729..1019f5e7dbdd 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,6 +35,7 @@
#define RXE_VERBS_H
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <rdma/rdma_user_rxe.h>
#include "rxe_pool.h"
#include "rxe_task.h"
@@ -281,6 +282,8 @@ struct rxe_qp {
struct timer_list rnr_nak_timer;
spinlock_t state_lock; /* guard requester and completer */
+
+ struct execute_work cleanup_work;
};
enum rxe_mem_state {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 71ea9e26666c..962fbcb57dc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -766,12 +766,14 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
skb_orphan(skb);
skb_dst_drop(skb);
- if (netif_queue_stopped(dev))
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS)) {
+ if (netif_queue_stopped(dev)) {
+ rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS);
+ if (unlikely(rc < 0))
ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ else if (rc)
napi_schedule(&priv->send_napi);
- }
+ }
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
@@ -876,7 +878,7 @@ int ipoib_cm_dev_open(struct net_device *dev)
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
- printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
+ pr_warn("%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
@@ -884,8 +886,8 @@ int ipoib_cm_dev_open(struct net_device *dev)
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
0);
if (ret) {
- printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
- IPOIB_CM_IETF_ID | priv->qp->qp_num);
+ pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
+ IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
@@ -1562,7 +1564,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
if (PTR_ERR(priv->cm.srq) != -ENOSYS)
- printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
+ pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e6151a29c412..10384ea50bed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -644,7 +644,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (netif_queue_stopped(dev))
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS))
+ IB_CQ_REPORT_MISSED_EVENTS) < 0)
ipoib_warn(priv, "request notify on send CQ failed\n");
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
@@ -1085,8 +1085,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
netif_addr_unlock_bh(priv->dev);
- err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
- priv->dev, &port, &index);
+ err = ib_find_gid(priv->ca, &search_gid, priv->dev, &port, &index);
netif_addr_lock_bh(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8880351df179..5930c7d9a8fb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -768,13 +768,30 @@ static void path_rec_completion(int status,
if (!status) {
struct rdma_ah_attr av;
- if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
+ if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
+ pathrec, &av))
ah = ipoib_create_ah(dev, priv->pd, &av);
}
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -840,6 +857,23 @@ static void path_rec_completion(int status,
}
}
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
+ void *gid)
+{
+ path->dev = priv->dev;
+
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
+
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.numb_path = 1;
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+}
+
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -852,21 +886,11 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
if (!path)
return NULL;
- path->dev = dev;
-
skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list);
- if (rdma_cap_opa_ah(priv->ca, priv->port))
- path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
- else
- path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
- memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
- path->pathrec.sgid = priv->local_gid;
- path->pathrec.pkey = cpu_to_be16(priv->pkey);
- path->pathrec.numb_path = 1;
- path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ init_path_rec(priv, path, gid);
return path;
}
@@ -1005,6 +1029,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
+ /* no broadcast means that all paths are (going to be) not valid */
+ if (!priv->broadcast)
+ goto drop_and_unlock;
+
path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
@@ -1014,6 +1042,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
new_path = 1;
}
if (path) {
+ if (!new_path)
+ /* make sure there is no changes in the existing path record */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
@@ -1030,8 +1062,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
} else
__path_add(dev, path);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1051,11 +1082,16 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+
+drop_and_unlock:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1674,8 +1710,8 @@ static int ipoib_dev_init_default(struct net_device *dev)
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
if (!priv->tx_ring) {
- printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
- priv->ca->name, ipoib_sendq_size);
+ pr_warn("%s: failed to allocate TX ring (%d entries)\n",
+ priv->ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
@@ -2207,16 +2243,17 @@ static struct net_device *ipoib_add_port(const char *format,
int result = -ENOMEM;
priv = ipoib_intf_alloc(hca, port, format);
- if (!priv)
+ if (!priv) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
goto alloc_mem_failed;
+ }
SET_NETDEV_DEV(priv->dev, hca->dev.parent);
priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr);
if (result) {
- printk(KERN_WARNING "%s: ib_query_port %d failed\n",
- hca->name, port);
+ pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
goto device_init_failed;
}
@@ -2231,8 +2268,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
- printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2249,8 +2286,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
if (result) {
- printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2260,8 +2297,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ipoib_dev_init(priv->dev, hca, port);
if (result) {
- printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: failed to initialize port %d (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2271,8 +2308,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = register_netdev(priv->dev);
if (result) {
- printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
- hca->name, port, result);
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+ hca->name, port, result);
goto register_failed;
}
@@ -2337,8 +2374,7 @@ static void ipoib_add_one(struct ib_device *device)
}
if (!count) {
- pr_err("Failed to init port, removing it\n");
- ipoib_remove_one(device, dev_list);
+ kfree(dev_list);
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index a1ed25422b72..984a88096f39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -178,7 +178,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
- printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
+ pr_warn("%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
@@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
- printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
+ pr_warn("%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
}
@@ -208,7 +208,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
- printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
+ pr_warn("%s: failed to create QP\n", ca->name);
goto out_free_send_cq;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2a07692007bd..df49c4eb67f7 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -142,8 +142,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
}
- iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
- "VA:%#llX + unsol:%d\n",
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
task->itt, mem_reg->rkey,
(unsigned long long)mem_reg->sge.addr, unsol_sz);
}
@@ -436,7 +435,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = NULL;
+ struct iser_tx_desc *tx_desc;
struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
@@ -452,10 +451,8 @@ int iser_send_data_out(struct iscsi_conn *conn,
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
- if (tx_desc == NULL) {
- iser_err("Failed to alloc desc for post dataout\n");
+ if (!tx_desc)
return -ENOMEM;
- }
tx_desc->type = ISCSI_TX_DATAOUT;
tx_desc->cqe.done = iser_dataout_comp;
@@ -475,8 +472,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
- iser_err("Offset:%ld & DSL:%ld in Data-Out "
- "inconsistent with total len:%ld, itt:%d\n",
+ iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
@@ -614,8 +610,8 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
iser_conn, rkey);
if (unlikely(!iser_conn->snd_w_inv)) {
- iser_err("conn %p: unexpected remote invalidation, "
- "terminating connection\n", iser_conn);
+ iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
+ iser_conn);
return -EPROTO;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 1b02283ce20e..fff40b097947 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2124,6 +2124,9 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
u32 rkey, offset;
int ret;
+ if (cmd->ctx_init_done)
+ goto rdma_ctx_post;
+
if (dir == DMA_FROM_DEVICE) {
addr = cmd->write_va;
rkey = cmd->write_stag;
@@ -2151,11 +2154,15 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
se_cmd->t_data_sg, se_cmd->t_data_nents,
offset, addr, rkey, dir);
}
+
if (ret < 0) {
isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
return ret;
}
+ cmd->ctx_init_done = true;
+
+rdma_ctx_post:
ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
if (ret < 0)
isert_err("Cmd: %p failed to post RDMA res\n", cmd);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index d6fd248320ae..3b296bac4f60 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -126,6 +126,7 @@ struct isert_cmd {
struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
+ bool ctx_init_done;
};
static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 4b615c1451e7..15711dcc6f58 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -710,7 +710,7 @@ vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
/**
* opa_vnic_vema_send_trap -- This function sends a trap to the EM
- * @cport: pointer to vnic control port
+ * @adapter: pointer to vnic adapter
* @data: pointer to trap data filled by calling function
* @lid: issuers lid (encap_slid from vesw_port_info)
*
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 972d4b3c5223..b48843833d69 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -41,6 +41,7 @@
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
+#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <linux/atomic.h>
@@ -144,7 +145,9 @@ static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
@@ -265,8 +268,8 @@ static void srp_qp_event(struct ib_event *event, void *context)
ib_event_msg(event->event), event->event);
}
-static int srp_init_qp(struct srp_target_port *target,
- struct ib_qp *qp)
+static int srp_init_ib_qp(struct srp_target_port *target,
+ struct ib_qp *qp)
{
struct ib_qp_attr *attr;
int ret;
@@ -277,7 +280,7 @@ static int srp_init_qp(struct srp_target_port *target,
ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
- be16_to_cpu(target->pkey),
+ be16_to_cpu(target->ib_cm.pkey),
&attr->pkey_index);
if (ret)
goto out;
@@ -298,32 +301,110 @@ out:
return ret;
}
-static int srp_new_cm_id(struct srp_rdma_ch *ch)
+static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
- srp_cm_handler, ch);
+ srp_ib_cm_handler, ch);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
- if (ch->cm_id)
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = new_cm_id;
+ if (ch->ib_cm.cm_id)
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = new_cm_id;
if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
target->srp_host->port))
- ch->path.rec_type = SA_PATH_REC_TYPE_OPA;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
else
- ch->path.rec_type = SA_PATH_REC_TYPE_IB;
- ch->path.sgid = target->sgid;
- ch->path.dgid = target->orig_dgid;
- ch->path.pkey = target->pkey;
- ch->path.service_id = target->service_id;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
+ ch->ib_cm.path.sgid = target->sgid;
+ ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
+ ch->ib_cm.path.pkey = target->ib_cm.pkey;
+ ch->ib_cm.path.service_id = target->ib_cm.service_id;
return 0;
}
+static const char *inet_ntop(const void *sa, char *dst, unsigned int size)
+{
+ switch (((struct sockaddr *)sa)->sa_family) {
+ case AF_INET:
+ snprintf(dst, size, "%pI4",
+ &((struct sockaddr_in *)sa)->sin_addr);
+ break;
+ case AF_INET6:
+ snprintf(dst, size, "%pI6",
+ &((struct sockaddr_in6 *)sa)->sin6_addr);
+ break;
+ default:
+ snprintf(dst, size, "???");
+ break;
+ }
+ return dst;
+}
+
+static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct rdma_cm_id *new_cm_id;
+ char src_addr[64], dst_addr[64];
+ int ret;
+
+ new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(new_cm_id)) {
+ ret = PTR_ERR(new_cm_id);
+ new_cm_id = NULL;
+ goto out;
+ }
+
+ init_completion(&ch->done);
+ ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
+ (struct sockaddr *)&target->rdma_cm.src : NULL,
+ (struct sockaddr *)&target->rdma_cm.dst,
+ SRP_PATH_REC_TIMEOUT_MS);
+ if (ret) {
+ pr_err("No route available from %s to %s (%d)\n",
+ target->rdma_cm.src_specified ?
+ inet_ntop(&target->rdma_cm.src, src_addr,
+ sizeof(src_addr)) : "(any)",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+ ret = wait_for_completion_interruptible(&ch->done);
+ if (ret < 0)
+ goto out;
+
+ ret = ch->status;
+ if (ret) {
+ pr_err("Resolving address %s failed (%d)\n",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+
+ swap(ch->rdma_cm.cm_id, new_cm_id);
+
+out:
+ if (new_cm_id)
+ rdma_destroy_id(new_cm_id);
+
+ return ret;
+}
+
+static int srp_new_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
+ srp_new_ib_cm_id(ch);
+}
+
static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
@@ -521,16 +602,25 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
- qp = ib_create_qp(dev->pd, init_attr);
- if (IS_ERR(qp)) {
- ret = PTR_ERR(qp);
+ if (target->using_rdma_cm) {
+ ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
+ qp = ch->rdma_cm.cm_id->qp;
+ } else {
+ qp = ib_create_qp(dev->pd, init_attr);
+ if (!IS_ERR(qp)) {
+ ret = srp_init_ib_qp(target, qp);
+ if (ret)
+ ib_destroy_qp(qp);
+ } else {
+ ret = PTR_ERR(qp);
+ }
+ }
+ if (ret) {
+ pr_err("QP creation failed for dev %s: %d\n",
+ dev_name(&dev->dev->dev), ret);
goto err_send_cq;
}
- ret = srp_init_qp(target, qp);
- if (ret)
- goto err_qp;
-
if (dev->use_fast_reg) {
fr_pool = srp_alloc_fr_pool(target);
if (IS_ERR(fr_pool)) {
@@ -574,7 +664,10 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return 0;
err_qp:
- ib_destroy_qp(qp);
+ if (target->using_rdma_cm)
+ rdma_destroy_qp(ch->rdma_cm.cm_id);
+ else
+ ib_destroy_qp(qp);
err_send_cq:
ib_free_cq(send_cq);
@@ -600,9 +693,16 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (!ch->target)
return;
- if (ch->cm_id) {
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = NULL;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id) {
+ rdma_destroy_id(ch->rdma_cm.cm_id);
+ ch->rdma_cm.cm_id = NULL;
+ }
+ } else {
+ if (ch->ib_cm.cm_id) {
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = NULL;
+ }
}
/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
@@ -658,16 +758,16 @@ static void srp_path_rec_completion(int status,
shost_printk(KERN_ERR, target->scsi_host,
PFX "Got failed path rec status %d\n", status);
else
- ch->path = *pathrec;
+ ch->ib_cm.path = *pathrec;
complete(&ch->done);
}
-static int srp_lookup_path(struct srp_rdma_ch *ch)
+static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret = -ENODEV;
- ch->path.numb_path = 1;
+ ch->ib_cm.path.numb_path = 1;
init_completion(&ch->done);
@@ -678,10 +778,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
if (!scsi_host_get(target->scsi_host))
goto out;
- ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
+ ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
- &ch->path,
+ &ch->ib_cm.path,
IB_SA_PATH_REC_SERVICE_ID |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
@@ -690,8 +790,8 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
SRP_PATH_REC_TIMEOUT_MS,
GFP_KERNEL,
srp_path_rec_completion,
- ch, &ch->path_query);
- ret = ch->path_query_id;
+ ch, &ch->ib_cm.path_query);
+ ret = ch->ib_cm.path_query_id;
if (ret < 0)
goto put;
@@ -702,7 +802,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
- PFX "Path record query failed\n");
+ PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
+ ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id));
put:
scsi_host_put(target->scsi_host);
@@ -711,6 +814,34 @@ out:
return ret;
}
+static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ int ret;
+
+ init_completion(&ch->done);
+
+ ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
+ if (ret)
+ return ret;
+
+ wait_for_completion_interruptible(&ch->done);
+
+ if (ch->status != 0)
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Path resolution failed\n");
+
+ return ch->status;
+}
+
+static int srp_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
+ srp_ib_lookup_path(ch);
+}
+
static u8 srp_get_subnet_timeout(struct srp_host *host)
{
struct ib_port_attr attr;
@@ -732,48 +863,76 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
{
struct srp_target_port *target = ch->target;
struct {
- struct ib_cm_req_param param;
- struct srp_login_req priv;
+ struct rdma_conn_param rdma_param;
+ struct srp_login_req_rdma rdma_req;
+ struct ib_cm_req_param ib_param;
+ struct srp_login_req ib_req;
} *req = NULL;
+ char *ipi, *tpi;
int status;
- u8 subnet_timeout;
-
- subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->param.primary_path = &ch->path;
- req->param.alternate_path = NULL;
- req->param.service_id = target->service_id;
- req->param.qp_num = ch->qp->qp_num;
- req->param.qp_type = ch->qp->qp_type;
- req->param.private_data = &req->priv;
- req->param.private_data_len = sizeof req->priv;
- req->param.flow_control = 1;
-
- get_random_bytes(&req->param.starting_psn, 4);
- req->param.starting_psn &= 0xffffff;
+ req->ib_param.flow_control = 1;
+ req->ib_param.retry_count = target->tl_retry_count;
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
- req->param.responder_resources = 4;
- req->param.remote_cm_response_timeout = subnet_timeout + 2;
- req->param.local_cm_response_timeout = subnet_timeout + 2;
- req->param.retry_count = target->tl_retry_count;
- req->param.rnr_retry_count = 7;
- req->param.max_cm_retries = 15;
-
- req->priv.opcode = SRP_LOGIN_REQ;
- req->priv.tag = 0;
- req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
- req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ req->ib_param.responder_resources = 4;
+ req->ib_param.rnr_retry_count = 7;
+ req->ib_param.max_cm_retries = 15;
+
+ req->ib_req.opcode = SRP_LOGIN_REQ;
+ req->ib_req.tag = 0;
+ req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len);
+ req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
- req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
- SRP_MULTICHAN_SINGLE);
+ req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
+ SRP_MULTICHAN_SINGLE);
+
+ if (target->using_rdma_cm) {
+ req->rdma_param.flow_control = req->ib_param.flow_control;
+ req->rdma_param.responder_resources =
+ req->ib_param.responder_resources;
+ req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
+ req->rdma_param.retry_count = req->ib_param.retry_count;
+ req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
+ req->rdma_param.private_data = &req->rdma_req;
+ req->rdma_param.private_data_len = sizeof(req->rdma_req);
+
+ req->rdma_req.opcode = req->ib_req.opcode;
+ req->rdma_req.tag = req->ib_req.tag;
+ req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
+ req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
+ req->rdma_req.req_flags = req->ib_req.req_flags;
+
+ ipi = req->rdma_req.initiator_port_id;
+ tpi = req->rdma_req.target_port_id;
+ } else {
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
+
+ req->ib_param.primary_path = &ch->ib_cm.path;
+ req->ib_param.alternate_path = NULL;
+ req->ib_param.service_id = target->ib_cm.service_id;
+ get_random_bytes(&req->ib_param.starting_psn, 4);
+ req->ib_param.starting_psn &= 0xffffff;
+ req->ib_param.qp_num = ch->qp->qp_num;
+ req->ib_param.qp_type = ch->qp->qp_type;
+ req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.private_data = &req->ib_req;
+ req->ib_param.private_data_len = sizeof(req->ib_req);
+
+ ipi = req->ib_req.initiator_port_id;
+ tpi = req->ib_req.target_port_id;
+ }
+
/*
* In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
@@ -784,19 +943,15 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
* recognized by the I/O Class they report.
*/
if (target->io_class == SRP_REV10_IB_IO_CLASS) {
- memcpy(req->priv.initiator_port_id,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->initiator_ext, 8);
- memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
- memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
+ memcpy(ipi, &target->sgid.global.interface_id, 8);
+ memcpy(ipi + 8, &target->initiator_ext, 8);
+ memcpy(tpi, &target->ioc_guid, 8);
+ memcpy(tpi + 8, &target->id_ext, 8);
} else {
- memcpy(req->priv.initiator_port_id,
- &target->initiator_ext, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.target_port_id, &target->id_ext, 8);
- memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
+ memcpy(ipi, &target->initiator_ext, 8);
+ memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
+ memcpy(tpi, &target->id_ext, 8);
+ memcpy(tpi + 8, &target->ioc_guid, 8);
}
/*
@@ -809,12 +964,14 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
PFX "Topspin/Cisco initiator port ID workaround "
"activated for target GUID %016llx\n",
be64_to_cpu(target->ioc_guid));
- memset(req->priv.initiator_port_id, 0, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->srp_host->srp_dev->dev->node_guid, 8);
+ memset(ipi, 0, 8);
+ memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
}
- status = ib_send_cm_req(ch->cm_id, &req->param);
+ if (target->using_rdma_cm)
+ status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
+ else
+ status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
kfree(req);
@@ -841,14 +998,23 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
static void srp_disconnect_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
- int i;
+ int i, ret;
/* XXX should send SRP_I_LOGOUT request */
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ch->connected = false;
- if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
+ ret = 0;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id)
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ } else {
+ if (ch->ib_cm.cm_id)
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
+ NULL, 0);
+ }
+ if (ret < 0) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
}
@@ -962,6 +1128,7 @@ static void srp_remove_target(struct srp_target_port *target)
scsi_remove_host(target->scsi_host);
srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
@@ -2349,7 +2516,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_target_port *target = ch->target;
struct ib_qp_attr *qp_attr = NULL;
int attr_mask = 0;
- int ret;
+ int ret = 0;
int i;
if (lrsp->opcode == SRP_LOGIN_RSP) {
@@ -2379,40 +2546,42 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
- ret = -ENOMEM;
- qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
- if (!qp_attr)
- goto error;
-
- qp_attr->qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
-
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
-
for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = ch->rx_ring[i];
ret = srp_post_recv(ch, iu);
if (ret)
- goto error_free;
+ goto error;
}
- qp_attr->qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
+ if (!target->using_rdma_cm) {
+ ret = -ENOMEM;
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
+ if (!qp_attr)
+ goto error;
- target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+ qp_attr->qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ qp_attr->qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
+
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
+
+ ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ }
error_free:
kfree(qp_attr);
@@ -2421,41 +2590,43 @@ error:
ch->status = ret;
}
-static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event,
- struct srp_rdma_ch *ch)
+static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event,
+ struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct Scsi_Host *shost = target->scsi_host;
struct ib_class_port_info *cpi;
int opcode;
+ u16 dlid;
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
- sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
- ch->path.pkey = cpi->redirect_pkey;
+ dlid = be16_to_cpu(cpi->redirect_lid);
+ sa_path_set_dlid(&ch->ib_cm.path, dlid);
+ ch->ib_cm.path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
- memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
+ memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
- ch->status = sa_path_get_dlid(&ch->path) ?
- SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
+ ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
break;
case IB_CM_REJ_PORT_REDIRECT:
if (srp_target_is_topspin(target)) {
+ union ib_gid *dgid = &ch->ib_cm.path.dgid;
+
/*
* Topspin/Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* (port redirect).
*/
- memcpy(ch->path.dgid.raw,
- event->param.rej_rcvd.ari, 16);
+ memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
shost_printk(KERN_DEBUG, shost,
PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
- be64_to_cpu(ch->path.dgid.global.subnet_prefix),
- be64_to_cpu(ch->path.dgid.global.interface_id));
+ be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
ch->status = SRP_PORT_REDIRECT;
} else {
@@ -2484,7 +2655,8 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
shost_printk(KERN_WARNING, shost, PFX
"SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
target->sgid.raw,
- target->orig_dgid.raw, reason);
+ target->ib_cm.orig_dgid.raw,
+ reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
@@ -2504,7 +2676,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
}
}
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
@@ -2527,7 +2699,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
- srp_cm_rej_handler(cm_id, event, ch);
+ srp_ib_cm_rej_handler(cm_id, event, ch);
break;
case IB_CM_DREQ_RECEIVED:
@@ -2565,6 +2737,135 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
+static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
+ struct rdma_cm_event *event)
+{
+ struct srp_target_port *target = ch->target;
+ struct Scsi_Host *shost = target->scsi_host;
+ int opcode;
+
+ switch (event->status) {
+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ opcode = *(u8 *) event->param.conn.private_data;
+ if (opcode == SRP_LOGIN_REJ) {
+ struct srp_login_rej *rej =
+ (struct srp_login_rej *)
+ event->param.conn.private_data;
+ u32 reason = be32_to_cpu(rej->reason);
+
+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ else
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ } else {
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
+ opcode);
+ }
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: stale connection\n");
+ ch->status = SRP_STALE_CONN;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
+ event->status);
+ ch->status = -ECONNRESET;
+ break;
+ }
+}
+
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct srp_rdma_ch *ch = cm_id->context;
+ struct srp_target_port *target = ch->target;
+ int comp = 0;
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ ch->status = -ENXIO;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ ch->status = -EHOSTUNREACH;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM REQ failed\n");
+ comp = 1;
+ ch->status = -ECONNRESET;
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ comp = 1;
+ srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
+ break;
+
+ case RDMA_CM_EVENT_REJECTED:
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
+ comp = 1;
+
+ srp_rdma_cm_rej_handler(ch, event);
+ break;
+
+ case RDMA_CM_EVENT_DISCONNECTED:
+ if (ch->connected) {
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "received DREQ\n");
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ comp = 1;
+ ch->status = 0;
+ queue_work(system_long_wq, &target->tl_err_work);
+ }
+ break;
+
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "connection closed\n");
+
+ comp = 1;
+ ch->status = 0;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled CM event %d\n", event->event);
+ break;
+ }
+
+ if (comp)
+ complete(&ch->done);
+
+ return 0;
+}
+
/**
* srp_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
@@ -2717,6 +3018,16 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
+static int srp_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct srp_target_port *target = host_to_target(shost);
+
+ if (target->target_can_queue)
+ starget->can_queue = target->target_can_queue;
+ return 0;
+}
+
static int srp_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -2766,7 +3077,10 @@ static ssize_t show_service_id(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%016llx\n",
+ be64_to_cpu(target->ib_cm.service_id));
}
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@@ -2774,7 +3088,9 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@@ -2791,7 +3107,9 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch = &target->ch[0];
- return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
static ssize_t show_orig_dgid(struct device *dev,
@@ -2799,7 +3117,9 @@ static ssize_t show_orig_dgid(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
static ssize_t show_req_lim(struct device *dev,
@@ -2921,6 +3241,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .target_alloc = srp_target_alloc,
.slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
@@ -3044,6 +3365,9 @@ static bool srp_conn_unique(struct srp_host *host,
if (t != target &&
target->id_ext == t->id_ext &&
target->ioc_guid == t->ioc_guid &&
+ (!target->using_rdma_cm ||
+ memcmp(&target->rdma_cm.dst, &t->rdma_cm.dst,
+ sizeof(target->rdma_cm.dst)) == 0) &&
target->initiator_ext == t->initiator_ext) {
ret = false;
break;
@@ -3060,6 +3384,9 @@ out:
*
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
* pkey=<P_Key>,service_id=<service ID>
+ * or
+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
+ * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
*
* to the add_target sysfs attribute.
*/
@@ -3080,11 +3407,20 @@ enum {
SRP_OPT_COMP_VECTOR = 1 << 12,
SRP_OPT_TL_RETRY_COUNT = 1 << 13,
SRP_OPT_QUEUE_SIZE = 1 << 14,
- SRP_OPT_ALL = (SRP_OPT_ID_EXT |
- SRP_OPT_IOC_GUID |
- SRP_OPT_DGID |
- SRP_OPT_PKEY |
- SRP_OPT_SERVICE_ID),
+ SRP_OPT_IP_SRC = 1 << 15,
+ SRP_OPT_IP_DEST = 1 << 16,
+ SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+};
+
+static unsigned int srp_opt_mandatory[] = {
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_DGID |
+ SRP_OPT_PKEY |
+ SRP_OPT_SERVICE_ID,
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_IP_DEST,
};
static const match_table_t srp_opt_tokens = {
@@ -3095,6 +3431,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_SERVICE_ID, "service_id=%s" },
{ SRP_OPT_MAX_SECT, "max_sect=%d" },
{ SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
+ { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
{ SRP_OPT_IO_CLASS, "io_class=%x" },
{ SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
{ SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
@@ -3103,15 +3440,33 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
{ SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { SRP_OPT_IP_SRC, "src=%s" },
+ { SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_ERR, NULL }
};
-static int srp_parse_options(const char *buf, struct srp_target_port *target)
+static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+ const char *addr_port_str)
+{
+ char *addr = kstrdup(addr_port_str, GFP_KERNEL);
+ char *port_str = addr;
+ int ret;
+
+ if (!addr)
+ return -ENOMEM;
+ strsep(&port_str, ":");
+ ret = inet_pton_with_scope(net, AF_UNSPEC, addr, port_str, sa);
+ kfree(addr);
+ return ret;
+}
+
+static int srp_parse_options(struct net *net, const char *buf,
+ struct srp_target_port *target)
{
char *options, *sep_opt;
char *p;
- char dgid[3];
substring_t args[MAX_OPT_ARGS];
+ unsigned long long ull;
int opt_mask = 0;
int token;
int ret = -EINVAL;
@@ -3136,7 +3491,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid id_ext parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->id_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3146,7 +3507,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid ioc_guid parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ioc_guid = cpu_to_be64(ull);
kfree(p);
break;
@@ -3162,16 +3529,10 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
goto out;
}
- for (i = 0; i < 16; ++i) {
- strlcpy(dgid, p + i * 2, sizeof(dgid));
- if (sscanf(dgid, "%hhx",
- &target->orig_dgid.raw[i]) < 1) {
- ret = -EINVAL;
- kfree(p);
- goto out;
- }
- }
+ ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
kfree(p);
+ if (ret < 0)
+ goto out;
break;
case SRP_OPT_PKEY:
@@ -3179,7 +3540,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("bad P_Key parameter '%s'\n", p);
goto out;
}
- target->pkey = cpu_to_be16(token);
+ target->ib_cm.pkey = cpu_to_be16(token);
break;
case SRP_OPT_SERVICE_ID:
@@ -3188,7 +3549,45 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad service_id parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ib_cm.service_id = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_SRC:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
+ if (ret < 0) {
+ pr_warn("bad source parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->rdma_cm.src_specified = true;
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_DEST:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
+ if (ret < 0) {
+ pr_warn("bad dest parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->using_rdma_cm = true;
kfree(p);
break;
@@ -3221,6 +3620,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->scsi_host->cmd_per_lun = token;
break;
+ case SRP_OPT_TARGET_CAN_QUEUE:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad max target_can_queue parameter '%s'\n",
+ p);
+ goto out;
+ }
+ target->target_can_queue = token;
+ break;
+
case SRP_OPT_IO_CLASS:
if (match_hex(args, &token)) {
pr_warn("bad IO class parameter '%s'\n", p);
@@ -3242,7 +3650,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad initiator_ext value '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->initiator_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3297,14 +3711,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
}
}
- if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
- ret = 0;
- else
- for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
- if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
- !(srp_opt_tokens[i].token & opt_mask))
- pr_warn("target creation request is missing parameter '%s'\n",
- srp_opt_tokens[i].pattern);
+ for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
+ if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret)
+ pr_warn("target creation request is missing one or more parameters\n");
if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
&& (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
@@ -3345,6 +3759,7 @@ static ssize_t srp_create_target(struct device *dev,
target = host_to_target(target_host);
+ target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
@@ -3366,18 +3781,29 @@ static ssize_t srp_create_target(struct device *dev,
if (ret < 0)
goto put;
- ret = srp_parse_options(buf, target);
+ ret = srp_parse_options(target->net, buf, target);
if (ret)
goto out;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) {
- shost_printk(KERN_INFO, target->scsi_host,
- PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be64_to_cpu(target->initiator_ext));
+ if (target->using_rdma_cm) {
+ char dst_addr[64];
+
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)));
+ } else {
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be64_to_cpu(target->initiator_ext));
+ }
ret = -EEXIST;
goto out;
}
@@ -3478,11 +3904,18 @@ static ssize_t srp_create_target(struct device *dev,
ret = srp_connect_ch(ch, multich);
if (ret) {
+ char dst[64];
+
+ if (target->using_rdma_cm)
+ inet_ntop(&target->rdma_cm.dst, dst,
+ sizeof(dst));
+ else
+ snprintf(dst, sizeof(dst), "%pI6",
+ target->ib_cm.orig_dgid.raw);
shost_printk(KERN_ERR, target->scsi_host,
- PFX "Connection %d/%d to %pI6 failed\n",
+ PFX "Connection %d/%d to %s failed\n",
ch_start + cpu_idx,
- target->ch_count,
- ch->target->orig_dgid.raw);
+ target->ch_count, dst);
if (node_idx == 0 && cpu_idx == 0) {
goto free_ch;
} else {
@@ -3507,13 +3940,25 @@ connected:
goto err_disconnect;
if (target->state != SRP_TARGET_REMOVED) {
- shost_printk(KERN_DEBUG, target->scsi_host, PFX
- "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be16_to_cpu(target->pkey),
- be64_to_cpu(target->service_id),
- target->sgid.raw, target->orig_dgid.raw);
+ if (target->using_rdma_cm) {
+ char dst[64];
+
+ inet_ntop(&target->rdma_cm.dst, dst, sizeof(dst));
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ target->sgid.raw, dst);
+ } else {
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id),
+ target->sgid.raw,
+ target->ib_cm.orig_dgid.raw);
+ }
}
ret = count;
@@ -3523,8 +3968,16 @@ out:
put:
scsi_host_put(target->scsi_host);
- if (ret < 0)
+ if (ret < 0) {
+ /*
+ * If a call to srp_remove_target() has not been scheduled,
+ * drop the network namespace reference now that was obtained
+ * earlier in this function.
+ */
+ if (target->state != SRP_TARGET_REMOVED)
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
scsi_host_put(target->scsi_host);
+ }
return ret;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index a814f5ef16f9..a2706086b9c7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -45,6 +45,7 @@
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_fmr_pool.h>
+#include <rdma/rdma_cm.h>
enum {
SRP_PATH_REC_TIMEOUT_MS = 1000,
@@ -153,11 +154,18 @@ struct srp_rdma_ch {
struct completion done;
int status;
- struct sa_path_rec path;
- struct ib_sa_query *path_query;
- int path_query_id;
+ union {
+ struct ib_cm {
+ struct sa_path_rec path;
+ struct ib_sa_query *path_query;
+ int path_query_id;
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ struct rdma_cm {
+ struct rdma_cm_id *cm_id;
+ } rdma_cm;
+ };
- struct ib_cm_id *cm_id;
struct srp_iu **tx_ring;
struct srp_iu **rx_ring;
struct srp_request *req_ring;
@@ -182,6 +190,7 @@ struct srp_target_port {
/* read only in the hot path */
u32 global_rkey;
struct srp_rdma_ch *ch;
+ struct net *net;
u32 ch_count;
u32 lkey;
enum srp_target_state state;
@@ -194,7 +203,6 @@ struct srp_target_port {
union ib_gid sgid;
__be64 id_ext;
__be64 ioc_guid;
- __be64 service_id;
__be64 initiator_ext;
u16 io_class;
struct srp_host *srp_host;
@@ -203,6 +211,7 @@ struct srp_target_port {
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ unsigned int target_can_queue;
int mr_pool_size;
int mr_per_cmd;
int queue_size;
@@ -210,8 +219,28 @@ struct srp_target_port {
int comp_vector;
int tl_retry_count;
- union ib_gid orig_dgid;
- __be16 pkey;
+ bool using_rdma_cm;
+
+ union {
+ struct {
+ __be64 service_id;
+ union ib_gid orig_dgid;
+ __be16 pkey;
+ } ib_cm;
+ struct {
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } src;
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } dst;
+ bool src_specified;
+ } rdma_cm;
+ };
u32 rq_tmo_jiffies;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index bfa576aa9f03..0373b7c40902 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <rdma/ib_cache.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
@@ -120,7 +121,9 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
}
/**
- * srpt_event_handler() - Asynchronous IB event callback function.
+ * srpt_event_handler - asynchronous IB event callback function
+ * @handler: IB event handler registered by ib_register_event_handler().
+ * @event: Description of the event that occurred.
*
* Callback function called by the InfiniBand core when an asynchronous IB
* event occurs. This callback may occur in interrupt context. See also
@@ -132,6 +135,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
{
struct srpt_device *sdev;
struct srpt_port *sport;
+ u8 port_num;
sdev = ib_get_client_data(event->device, &srpt_client);
if (!sdev || sdev->device != event->device)
@@ -142,10 +146,15 @@ static void srpt_event_handler(struct ib_event_handler *handler,
switch (event->event) {
case IB_EVENT_PORT_ERR:
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
sport->lid = 0;
sport->sm_lid = 0;
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
case IB_EVENT_PORT_ACTIVE:
@@ -155,25 +164,31 @@ static void srpt_event_handler(struct ib_event_handler *handler,
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work);
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
default:
- pr_err("received unrecognized IB event %d\n",
- event->event);
+ pr_err("received unrecognized IB event %d\n", event->event);
break;
}
}
/**
- * srpt_srq_event() - SRQ event callback function.
+ * srpt_srq_event - SRQ event callback function
+ * @event: Description of the event that occurred.
+ * @ctx: Context pointer specified at SRQ creation time.
*/
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
- pr_info("SRQ event %d\n", event->event);
+ pr_debug("SRQ event %d\n", event->event);
}
static const char *get_ch_state_name(enum rdma_ch_state s)
@@ -194,16 +209,18 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
}
/**
- * srpt_qp_event() - QP event callback function.
+ * srpt_qp_event - QP event callback function
+ * @event: Description of the event that occurred.
+ * @ch: SRPT RDMA channel.
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
- event->event, ch->cm_id, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
+ event->event, ch, ch->sess_name, ch->state);
switch (event->event) {
case IB_EVENT_COMM_EST:
- ib_cm_notify(ch->cm_id, event->event);
+ ib_cm_notify(ch->ib_cm.cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
pr_debug("%s-%d, state %s: received Last WQE event.\n",
@@ -217,8 +234,8 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
}
/**
- * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
- *
+ * srpt_set_ioc - initialize a IOUnitInfo structure
+ * @c_list: controller list.
* @slot: one-based slot number.
* @value: four-bit value.
*
@@ -241,7 +258,8 @@ static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
}
/**
- * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
*
* See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
* Specification.
@@ -260,7 +278,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
}
/**
- * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ * srpt_get_iou - write IOUnitInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
*
* See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
* Specification. See also section B.7, table B.6 in the SRP r16a document.
@@ -284,7 +303,10 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
}
/**
- * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ * srpt_get_ioc - write IOControllerprofile to a management datagram
+ * @sport: HCA port through which the MAD has been received.
+ * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
*
* See also section 16.3.3.4 IOControllerProfile in the InfiniBand
* Architecture Specification. See also section B.7, table B.7 in the SRP
@@ -314,7 +336,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
if (sdev->use_srq)
send_queue_depth = sdev->srq_size;
else
- send_queue_depth = min(SRPT_RQ_SIZE,
+ send_queue_depth = min(MAX_SRPT_RQ_SIZE,
sdev->device->attrs.max_qp_wr);
memset(iocp, 0, sizeof(*iocp));
@@ -342,7 +364,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
}
/**
- * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ * srpt_get_svc_entries - write ServiceEntries to a management datagram
+ * @ioc_guid: I/O controller GUID to use in reply.
+ * @slot: I/O controller number.
+ * @hi: End of the range of service entries to be specified in the reply.
+ * @lo: Start of the range of service entries to be specified in the reply..
+ * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
*
* See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
* Specification. See also section B.7, table B.8 in the SRP r16a document.
@@ -379,8 +406,8 @@ static void srpt_get_svc_entries(u64 ioc_guid,
}
/**
- * srpt_mgmt_method_get() - Process a received management datagram.
- * @sp: source port through which the MAD has been received.
+ * srpt_mgmt_method_get - process a received management datagram
+ * @sp: HCA port through which the MAD has been received.
* @rq_mad: received MAD.
* @rsp_mad: response MAD.
*/
@@ -419,7 +446,9 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
}
/**
- * srpt_mad_send_handler() - Post MAD-send callback function.
+ * srpt_mad_send_handler - MAD send completion callback
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @mad_wc: Work completion reporting that the MAD has been sent.
*/
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
@@ -429,7 +458,10 @@ static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
}
/**
- * srpt_mad_recv_handler() - MAD reception callback function.
+ * srpt_mad_recv_handler - MAD reception callback function
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @send_buf: Not used.
+ * @mad_wc: Work completion reporting that a MAD has been received.
*/
static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
@@ -493,8 +525,18 @@ err:
ib_free_recv_mad(mad_wc);
}
+static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
+{
+ const __be16 *g = (const __be16 *)guid;
+
+ return snprintf(buf, size, "%04x:%04x:%04x:%04x",
+ be16_to_cpu(g[0]), be16_to_cpu(g[1]),
+ be16_to_cpu(g[2]), be16_to_cpu(g[3]));
+}
+
/**
- * srpt_refresh_port() - Configure a HCA port.
+ * srpt_refresh_port - configure a HCA port
+ * @sport: SRPT HCA port.
*
* Enable InfiniBand management datagram processing, update the cached sm_lid,
* lid and gid values, and register a callback function for processing MADs
@@ -507,7 +549,6 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_mad_reg_req reg_req;
struct ib_port_modify port_modify;
struct ib_port_attr port_attr;
- __be16 *guid;
int ret;
memset(&port_modify, 0, sizeof(port_modify));
@@ -531,11 +572,8 @@ static int srpt_refresh_port(struct srpt_port *sport)
goto err_query_port;
sport->port_guid_wwn.priv = sport;
- guid = (__be16 *)&sport->gid.global.interface_id;
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
+ srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ &sport->gid.global.interface_id);
sport->port_gid_wwn.priv = sport;
snprintf(sport->port_gid, sizeof(sport->port_gid),
"0x%016llx%016llx",
@@ -577,7 +615,8 @@ err_mod_port:
}
/**
- * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ * srpt_unregister_mad_agent - unregister MAD callback functions
+ * @sdev: SRPT HCA pointer.
*
* Note: It is safe to call this function more than once for the same device.
*/
@@ -602,7 +641,11 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
}
/**
- * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ * srpt_alloc_ioctx - allocate a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx_size: I/O context size.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
int ioctx_size, int dma_size,
@@ -633,7 +676,11 @@ err:
}
/**
- * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ * srpt_free_ioctx - free a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx: I/O context pointer.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
int dma_size, enum dma_data_direction dir)
@@ -647,7 +694,7 @@ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
}
/**
- * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
* @sdev: Device to allocate the I/O context ring for.
* @ring_size: Number of elements in the I/O context ring.
* @ioctx_size: I/O context size.
@@ -685,7 +732,12 @@ out:
}
/**
- * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
+ * @ioctx_ring: I/O context ring to be freed.
+ * @sdev: SRPT HCA pointer.
+ * @ring_size: Number of ring elements.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
struct srpt_device *sdev, int ring_size,
@@ -702,23 +754,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
}
/**
- * srpt_get_cmd_state() - Get the state of a SCSI command.
- */
-static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
-{
- enum srpt_command_state state;
- unsigned long flags;
-
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- state = ioctx->state;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
- return state;
-}
-
-/**
- * srpt_set_cmd_state() - Set the state of a SCSI command.
+ * srpt_set_cmd_state - set the state of a SCSI command
+ * @ioctx: Send I/O context.
+ * @new: New I/O context state.
*
* Does not modify the state of aborted commands. Returns the previous command
* state.
@@ -727,21 +765,19 @@ static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous != SRPT_STATE_DONE)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
return previous;
}
/**
- * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ * srpt_test_and_set_cmd_state - test and set the state of a command
+ * @ioctx: Send I/O context.
+ * @old: Current I/O context state.
+ * @new: New I/O context state.
*
* Returns true if and only if the previous command state was equal to 'old'.
*/
@@ -750,22 +786,23 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
WARN_ON(!ioctx);
WARN_ON(old == SRPT_STATE_DONE);
WARN_ON(new == SRPT_STATE_NEW);
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous == old)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
return previous == old;
}
/**
- * srpt_post_recv() - Post an IB receive request.
+ * srpt_post_recv - post an IB receive request
+ * @sdev: SRPT HCA pointer.
+ * @ch: SRPT RDMA channel.
+ * @ioctx: Receive I/O context pointer.
*/
static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
@@ -791,7 +828,8 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
}
/**
- * srpt_zerolength_write() - Perform a zero-length RDMA write.
+ * srpt_zerolength_write - perform a zero-length RDMA write
+ * @ch: SRPT RDMA channel.
*
* A quote from the InfiniBand specification: C9-88: For an HCA responder
* using Reliable Connection service, for each zero-length RDMA READ or WRITE
@@ -802,6 +840,9 @@ static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
struct ib_send_wr wr, *bad_wr;
+ pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ ch->qp->qp_num);
+
memset(&wr, 0, sizeof(wr));
wr.opcode = IB_WR_RDMA_WRITE;
wr.wr_cqe = &ch->zw_cqe;
@@ -813,13 +854,17 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = cq->cq_context;
+ pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
+ wc->status);
+
if (wc->status == IB_WC_SUCCESS) {
srpt_process_wait_list(ch);
} else {
if (srpt_set_ch_state(ch, CH_DISCONNECTED))
schedule_work(&ch->release_work);
else
- WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
+ pr_debug("%s-%d: already disconnected.\n",
+ ch->sess_name, ch->qp->qp_num);
}
}
@@ -928,11 +973,13 @@ static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
}
/**
- * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
* @ioctx: Pointer to the I/O context associated with the request.
* @srp_cmd: Pointer to the SRP_CMD request data.
* @dir: Pointer to the variable to which the transfer direction will be
* written.
+ * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
+ * @sg_cnt: [out] length of @sg.
* @data_len: Pointer to the variable to which the total data length of all
* descriptors in the SRP_CMD request will be written.
*
@@ -998,7 +1045,9 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
}
/**
- * srpt_init_ch_qp() - Initialize queue pair attributes.
+ * srpt_init_ch_qp - initialize queue pair attributes
+ * @ch: SRPT RDMA channel.
+ * @qp: Queue pair pointer.
*
* Initialized the attributes of queue pair 'qp' by allowing local write,
* remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
@@ -1015,7 +1064,12 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
attr->qp_state = IB_QPS_INIT;
attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
attr->port_num = ch->sport->port;
- attr->pkey_index = 0;
+
+ ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
+ ch->pkey, &attr->pkey_index);
+ if (ret < 0)
+ pr_err("Translating pkey %#x failed (%d) - using index 0\n",
+ ch->pkey, ret);
ret = ib_modify_qp(qp, attr,
IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
@@ -1026,7 +1080,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
}
/**
- * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1043,7 +1097,7 @@ static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1056,7 +1110,7 @@ out:
}
/**
- * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1073,7 +1127,7 @@ static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1086,7 +1140,8 @@ out:
}
/**
- * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ * srpt_ch_qp_err - set the channel queue pair state to 'error'
+ * @ch: SRPT RDMA channel.
*/
static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
{
@@ -1097,7 +1152,8 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
}
/**
- * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
+ * @ch: SRPT RDMA channel.
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
@@ -1119,11 +1175,9 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
- spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rdma = 0;
ioctx->n_rw_ctx = 0;
- init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false;
/*
* transport_init_se_cmd() does not initialize all fields, so do it
@@ -1136,14 +1190,12 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
}
/**
- * srpt_abort_cmd() - Abort a SCSI command.
+ * srpt_abort_cmd - abort a SCSI command
* @ioctx: I/O context associated with the SCSI command.
- * @context: Preferred execution context.
*/
static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
{
enum srpt_command_state state;
- unsigned long flags;
BUG_ON(!ioctx);
@@ -1152,7 +1204,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* the ib_srpt driver, change the state to the next state.
*/
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEED_DATA:
@@ -1167,7 +1218,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
__func__, state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
ioctx->state, ioctx->cmd.tag);
@@ -1206,6 +1256,10 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
/**
+ * srpt_rdma_read_done - RDMA read completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* the data that has been transferred via IB RDMA had to be postponed until the
* check_stop_free() callback. None of this is necessary anymore and needs to
@@ -1233,11 +1287,11 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
target_execute_cmd(&ioctx->cmd);
else
pr_err("%s[%d]: wrong state = %d\n", __func__,
- __LINE__, srpt_get_cmd_state(ioctx));
+ __LINE__, ioctx->state);
}
/**
- * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * srpt_build_cmd_rsp - build a SRP_RSP response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context associated with the SRP_CMD request. The response will
* be built in the buffer ioctx->buf points at and hence this function will
@@ -1297,7 +1351,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
}
/**
- * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * srpt_build_tskmgmt_rsp - build a task management response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context in which the SRP_RSP response will be built.
* @rsp_code: RSP_CODE that will be stored in the response.
@@ -1345,7 +1399,10 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
}
/**
- * srpt_handle_cmd() - Process SRP_CMD.
+ * srpt_handle_cmd - process a SRP_CMD information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*/
static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx,
@@ -1427,7 +1484,10 @@ static int srp_tmr_to_tcm(int fn)
}
/**
- * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*
* Returns 0 if and only if the request will be processed by the target core.
*
@@ -1449,9 +1509,9 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srp_tsk = recv_ioctx->ioctx.buf;
cmd = &send_ioctx->cmd;
- pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
- " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
- srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
+ srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
+ ch->sess);
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
@@ -1470,41 +1530,42 @@ fail:
}
/**
- * srpt_handle_new_iu() - Process a newly received information unit.
+ * srpt_handle_new_iu - process a newly received information unit
* @ch: RDMA channel through which the information unit has been received.
- * @ioctx: SRPT I/O context associated with the information unit.
+ * @recv_ioctx: Receive I/O context associated with the information unit.
*/
-static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
+static bool
+srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
{
+ struct srpt_send_ioctx *send_ioctx = NULL;
struct srp_cmd *srp_cmd;
+ bool res = false;
+ u8 opcode;
BUG_ON(!ch);
BUG_ON(!recv_ioctx);
+ if (unlikely(ch->state == CH_CONNECTING))
+ goto push;
+
ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- if (unlikely(ch->state == CH_CONNECTING))
- goto out_wait;
-
- if (unlikely(ch->state != CH_LIVE))
- return;
-
srp_cmd = recv_ioctx->ioctx.buf;
- if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
- if (!send_ioctx) {
- if (!list_empty(&ch->cmd_wait_list))
- goto out_wait;
- send_ioctx = srpt_get_send_ioctx(ch);
- }
+ opcode = srp_cmd->opcode;
+ if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
+ send_ioctx = srpt_get_send_ioctx(ch);
if (unlikely(!send_ioctx))
- goto out_wait;
+ goto push;
}
- switch (srp_cmd->opcode) {
+ if (!list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(!ch->processing_wait_list);
+ list_del_init(&recv_ioctx->wait_list);
+ }
+
+ switch (opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
break;
@@ -1524,16 +1585,22 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
pr_err("Received SRP_RSP\n");
break;
default:
- pr_err("received IU with unknown opcode 0x%x\n",
- srp_cmd->opcode);
+ pr_err("received IU with unknown opcode 0x%x\n", opcode);
break;
}
srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
- return;
+ res = true;
+
+out:
+ return res;
-out_wait:
- list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+push:
+ if (list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(ch->processing_wait_list);
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ }
+ goto out;
}
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1548,10 +1615,10 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
req_lim = atomic_dec_return(&ch->req_lim);
if (unlikely(req_lim < 0))
pr_err("req_lim = %d < 0\n", req_lim);
- srpt_handle_new_iu(ch, ioctx, NULL);
+ srpt_handle_new_iu(ch, ioctx);
} else {
- pr_info("receiving failed for ioctx %p with status %d\n",
- ioctx, wc->status);
+ pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
+ ioctx, wc->status);
}
}
@@ -1562,22 +1629,28 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
*/
static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
{
- struct srpt_send_ioctx *ioctx;
+ struct srpt_recv_ioctx *recv_ioctx, *tmp;
+
+ WARN_ON_ONCE(ch->state == CH_CONNECTING);
- while (!list_empty(&ch->cmd_wait_list) &&
- ch->state >= CH_LIVE &&
- (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
- struct srpt_recv_ioctx *recv_ioctx;
+ if (list_empty(&ch->cmd_wait_list))
+ return;
- recv_ioctx = list_first_entry(&ch->cmd_wait_list,
- struct srpt_recv_ioctx,
- wait_list);
- list_del(&recv_ioctx->wait_list);
- srpt_handle_new_iu(ch, recv_ioctx, ioctx);
+ WARN_ON_ONCE(ch->processing_wait_list);
+ ch->processing_wait_list = true;
+ list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
+ wait_list) {
+ if (!srpt_handle_new_iu(ch, recv_ioctx))
+ break;
}
+ ch->processing_wait_list = false;
}
/**
+ * srpt_send_done - send completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by
* srpt_handle_new_iu() fails. This is possible because the req_lim_delta
@@ -1619,7 +1692,8 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * srpt_create_ch_ib() - Create receive and send completion queues.
+ * srpt_create_ch_ib - create receive and send completion queues
+ * @ch: SRPT RDMA channel.
*/
static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
{
@@ -1627,7 +1701,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
- u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ int sq_size = sport->port_attrib.srp_sq_size;
int i, ret;
WARN_ON(ch->rq_size < 1);
@@ -1638,12 +1712,12 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out;
retry:
- ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
+ ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + srp_sq_size, ret);
+ ch->rq_size + sq_size, ret);
goto out;
}
@@ -1661,8 +1735,8 @@ retry:
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
- qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
- qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
+ qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
+ qp_init->cap.max_rdma_ctxs = sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
if (sdev->use_srq) {
@@ -1676,8 +1750,8 @@ retry:
if (IS_ERR(ch->qp)) {
ret = PTR_ERR(ch->qp);
if (ret == -ENOMEM) {
- srp_sq_size /= 2;
- if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
+ sq_size /= 2;
+ if (sq_size >= MIN_SRPT_SQ_SIZE) {
ib_destroy_cq(ch->cq);
goto retry;
}
@@ -1688,9 +1762,9 @@ retry:
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
- pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
__func__, ch->cq->cqe, qp_init->cap.max_send_sge,
- qp_init->cap.max_send_wr, ch->cm_id);
+ qp_init->cap.max_send_wr, ch);
ret = srpt_init_ch_qp(ch, ch->qp);
if (ret)
@@ -1718,7 +1792,8 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
}
/**
- * srpt_close_ch() - Close an RDMA channel.
+ * srpt_close_ch - close a RDMA channel
+ * @ch: SRPT RDMA channel.
*
* Make sure all resources associated with the channel will be deallocated at
* an appropriate time.
@@ -1743,8 +1818,6 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
pr_err("%s-%d: changing queue pair into error state failed: %d\n",
ch->sess_name, ch->qp->qp_num, ret);
- pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
- ch->qp->qp_num);
ret = srpt_zerolength_write(ch);
if (ret < 0) {
pr_err("%s-%d: queuing zero-length write failed: %d\n",
@@ -1776,9 +1849,9 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
return -ENOTCONN;
- ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0)
- ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0 && srpt_close_ch(ch))
ret = 0;
@@ -1786,83 +1859,135 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-/*
- * Send DREQ and wait for DREP. Return true if and only if this function
- * changed the state of @ch.
- */
-static bool srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
- __must_hold(&sdev->mutex)
+static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
{
- DECLARE_COMPLETION_ONSTACK(release_done);
- struct srpt_device *sdev = ch->sport->sdev;
- bool wait;
+ struct srpt_nexus *nexus;
+ struct srpt_rdma_ch *ch2;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (ch2 == ch) {
+ res = false;
+ goto done;
+ }
+ }
+ }
+done:
+ rcu_read_unlock();
- lockdep_assert_held(&sdev->mutex);
+ return res;
+}
+
+/* Send DREQ and wait for DREP. */
+static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
+{
+ struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
- WARN_ON(ch->release_done);
- ch->release_done = &release_done;
- wait = !list_empty(&ch->list);
+ mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
- if (!wait)
- goto out;
-
- while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
+ 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
-out:
- mutex_lock(&sdev->mutex);
- return wait;
}
-static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
- __must_hold(&sdev->mutex)
+static void __srpt_close_all_ch(struct srpt_port *sport)
{
- struct srpt_device *sdev = sport->sdev;
+ struct srpt_nexus *nexus;
struct srpt_rdma_ch *ch;
- lockdep_assert_held(&sdev->mutex);
+ lockdep_assert_held(&sport->mutex);
- if (sport->enabled == enabled)
- return;
- sport->enabled = enabled;
- if (sport->enabled)
- return;
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch) >= 0)
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
+ sport->sdev->device->name, sport->port);
+ srpt_close_ch(ch);
+ }
+ }
+}
-again:
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->sport == sport) {
- pr_info("%s: closing channel %s-%d\n",
- sdev->device->name, ch->sess_name,
- ch->qp->qp_num);
- if (srpt_disconnect_ch_sync(ch))
- goto again;
+/*
+ * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
+ * it does not yet exist.
+ */
+static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
+ const u8 i_port_id[16],
+ const u8 t_port_id[16])
+{
+ struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
+
+ for (;;) {
+ mutex_lock(&sport->mutex);
+ list_for_each_entry(n, &sport->nexus_list, entry) {
+ if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
+ memcmp(n->t_port_id, t_port_id, 16) == 0) {
+ nexus = n;
+ break;
+ }
}
+ if (!nexus && tmp_nexus) {
+ list_add_tail_rcu(&tmp_nexus->entry,
+ &sport->nexus_list);
+ swap(nexus, tmp_nexus);
+ }
+ mutex_unlock(&sport->mutex);
+
+ if (nexus)
+ break;
+ tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
+ if (!tmp_nexus) {
+ nexus = ERR_PTR(-ENOMEM);
+ break;
+ }
+ INIT_LIST_HEAD(&tmp_nexus->ch_list);
+ memcpy(tmp_nexus->i_port_id, i_port_id, 16);
+ memcpy(tmp_nexus->t_port_id, t_port_id, 16);
}
+ kfree(tmp_nexus);
+
+ return nexus;
+}
+
+static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
+ __must_hold(&sport->mutex)
+{
+ lockdep_assert_held(&sport->mutex);
+
+ if (sport->enabled == enabled)
+ return;
+ sport->enabled = enabled;
+ if (!enabled)
+ __srpt_close_all_ch(sport);
}
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
- kfree(ch);
+ kfree_rcu(ch, rcu);
}
static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
+ struct srpt_port *sport;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
- pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
- ch->qp->qp_num, ch->release_done);
+ pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
@@ -1877,169 +2002,141 @@ static void srpt_release_channel_work(struct work_struct *w)
transport_deregister_session(se_sess);
ch->sess = NULL;
- ib_destroy_cm_id(ch->cm_id);
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size,
srp_max_req_size, DMA_FROM_DEVICE);
- mutex_lock(&sdev->mutex);
- list_del_init(&ch->list);
- if (ch->release_done)
- complete(ch->release_done);
- mutex_unlock(&sdev->mutex);
+ sport = ch->sport;
+ mutex_lock(&sport->mutex);
+ list_del_rcu(&ch->list);
+ mutex_unlock(&sport->mutex);
- wake_up(&sdev->ch_releaseQ);
+ wake_up(&sport->ch_releaseQ);
kref_put(&ch->kref, srpt_free_ch);
}
/**
- * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
+ * @cm_id: IB/CM connection identifier.
+ * @port_num: Port through which the IB/CM REQ message was received.
+ * @pkey: P_Key of the incoming connection.
+ * @req: SRP login request.
+ * @src_addr: GID of the port that submitted the login request.
*
* Ownership of the cm_id is transferred to the target session if this
* functions returns zero. Otherwise the caller remains the owner of cm_id.
*/
static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
- struct ib_cm_req_event_param *param,
- void *private_data)
+ u8 port_num, __be16 pkey,
+ const struct srp_login_req *req,
+ const char *src_addr)
{
struct srpt_device *sdev = cm_id->context;
- struct srpt_port *sport = &sdev->port[param->port - 1];
- struct srp_login_req *req;
- struct srp_login_rsp *rsp;
- struct srp_login_rej *rej;
- struct ib_cm_rep_param *rep_param;
- struct srpt_rdma_ch *ch, *tmp_ch;
- __be16 *guid;
+ struct srpt_port *sport = &sdev->port[port_num - 1];
+ struct srpt_nexus *nexus;
+ struct srp_login_rsp *rsp = NULL;
+ struct srp_login_rej *rej = NULL;
+ struct ib_cm_rep_param *rep_param = NULL;
+ struct srpt_rdma_ch *ch;
+ char i_port_id[36];
u32 it_iu_len;
- int i, ret = 0;
+ int i, ret;
WARN_ON_ONCE(irqs_disabled());
- if (WARN_ON(!sdev || !private_data))
+ if (WARN_ON(!sdev || !req))
return -EINVAL;
- req = (struct srp_login_req *)private_data;
-
it_iu_len = be32_to_cpu(req->req_it_iu_len);
- pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
- " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
- " (guid=0x%llx:0x%llx)\n",
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
- it_iu_len,
- param->port,
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+ pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
+ req->initiator_port_id, req->target_port_id, it_iu_len,
+ port_num, &sport->gid, be16_to_cpu(pkey));
+ nexus = srpt_get_nexus(sport, req->initiator_port_id,
+ req->target_port_id);
+ if (IS_ERR(nexus)) {
+ ret = PTR_ERR(nexus);
+ goto out;
+ }
+
+ ret = -ENOMEM;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
rej = kzalloc(sizeof(*rej), GFP_KERNEL);
rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
-
- if (!rsp || !rej || !rep_param) {
- ret = -ENOMEM;
+ if (!rsp || !rej || !rep_param)
goto out;
- }
+ ret = -EINVAL;
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because its"
- " length (%d bytes) is out of range (%d .. %d)\n",
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
it_iu_len, 64, srp_max_req_size);
goto reject;
}
if (!sport->enabled) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because the target port"
- " has not yet been enabled\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
+ sport->sdev->device->name, port_num);
goto reject;
}
- if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
-
- mutex_lock(&sdev->mutex);
-
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
- if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
- && !memcmp(ch->t_port_id, req->target_port_id, 16)
- && param->port == ch->sport->port
- && param->listen_id == ch->sport->sdev->cm_id
- && ch->cm_id) {
- if (srpt_disconnect_ch(ch) < 0)
- continue;
- pr_info("Relogin - closed existing channel %s\n",
- ch->sess_name);
- rsp->rsp_flags =
- SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
- }
- }
-
- mutex_unlock(&sdev->mutex);
-
- } else
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
-
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
- ret = -ENOMEM;
- pr_err("rejected SRP_LOGIN_REQ because it"
- " has an invalid target port identifier.\n");
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
goto reject;
}
+ ret = -ENOMEM;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
- ret = -ENOMEM;
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
goto reject;
}
kref_init(&ch->kref);
+ ch->pkey = be16_to_cpu(pkey);
+ ch->nexus = nexus;
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
- memcpy(ch->i_port_id, req->initiator_port_id, 16);
- memcpy(ch->t_port_id, req->target_port_id, 16);
- ch->sport = &sdev->port[param->port - 1];
- ch->cm_id = cm_id;
+ ch->sport = sport;
+ ch->ib_cm.cm_id = cm_id;
cm_id->context = ch;
/*
* ch->rq_size should be at least as large as the initiator queue
* depth to avoid that the initiator driver has to report QUEUE_FULL
* to the SCSI mid-layer.
*/
- ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
+ ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
- ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+ ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->ioctx_ring = (struct srpt_send_ioctx **)
srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
sizeof(*ch->ioctx_ring[0]),
- ch->rsp_size, DMA_TO_DEVICE);
- if (!ch->ioctx_ring)
+ ch->max_rsp_size, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ch;
+ }
INIT_LIST_HEAD(&ch->free_list);
for (i = 0; i < ch->rq_size; i++) {
@@ -2058,59 +2155,88 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ring;
}
+ for (i = 0; i < ch->rq_size; i++)
+ INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
}
ret = srpt_create_ch_ib(ch);
if (ret) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because creating"
- " a new RDMA channel failed.\n");
- goto free_recv_ring;
- }
-
- ret = srpt_ch_qp_rtr(ch, ch->qp);
- if (ret) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because enabling"
- " RTR failed (error code = %d)\n", ret);
- goto destroy_ib;
+ pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
+ goto free_recv_ring;
}
- guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
- snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
- snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
- be64_to_cpu(*(__be64 *)ch->i_port_id),
- be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+ strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
+ snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)nexus->i_port_id),
+ be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->ini_guid, ch, NULL);
+ ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
- TARGET_PROT_NORMAL, ch->sess_name, ch,
+ TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->sess_name + 2, ch, NULL);
+ i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
- pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
- ch->sess_name);
- rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+ ret = PTR_ERR(ch->sess);
+ pr_info("Rejected login for initiator %s: ret = %d.\n",
+ ch->sess_name, ret);
+ rej->reason = cpu_to_be32(ret == -ENOMEM ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto reject;
+ }
+
+ mutex_lock(&sport->mutex);
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ struct srpt_rdma_ch *ch2;
+
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch2) < 0)
+ continue;
+ pr_info("Relogin - closed existing channel %s\n",
+ ch2->sess_name);
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ } else {
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+ }
+
+ list_add_tail_rcu(&ch->list, &nexus->ch_list);
+
+ if (!sport->enabled) {
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
+ sdev->device->name, port_num);
+ mutex_unlock(&sport->mutex);
+ goto reject;
+ }
+
+ mutex_unlock(&sport->mutex);
+
+ ret = srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
+ ret);
goto destroy_ib;
}
- pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
- ch->sess_name, ch->cm_id);
+ pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
+ ch->sess_name, ch);
/* create srp_login_response */
rsp->opcode = SRP_LOGIN_RSP;
@@ -2118,8 +2244,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rsp->max_it_iu_len = req->req_it_iu_len;
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
@@ -2135,25 +2261,31 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rep_param->responder_resources = 4;
rep_param->initiator_depth = 4;
- ret = ib_send_cm_rep(cm_id, rep_param);
- if (ret) {
- pr_err("sending SRP_LOGIN_REQ response failed"
- " (error code = %d)\n", ret);
- goto release_channel;
- }
+ /*
+ * Hold the sport mutex while accepting a connection to avoid that
+ * srpt_disconnect_ch() is invoked concurrently with this code.
+ */
+ mutex_lock(&sport->mutex);
+ if (sport->enabled && ch->state == CH_CONNECTING)
+ ret = ib_send_cm_rep(cm_id, rep_param);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&sport->mutex);
- mutex_lock(&sdev->mutex);
- list_add_tail(&ch->list, &sdev->rch_list);
- mutex_unlock(&sdev->mutex);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ goto reject;
+ default:
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
+ ret);
+ goto reject;
+ }
goto out;
-release_channel:
- srpt_disconnect_ch(ch);
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
- ch->sess = NULL;
-
destroy_ib:
srpt_destroy_ch_ib(ch);
@@ -2165,15 +2297,20 @@ free_recv_ring:
free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
free_ch:
+ cm_id->context = NULL;
kfree(ch);
+ ch = NULL;
+
+ WARN_ON_ONCE(ret == 0);
reject:
+ pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
- rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof(*rej));
@@ -2186,6 +2323,19 @@ out:
return ret;
}
+static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
+ struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ char sguid[40];
+
+ srpt_format_guid(sguid, sizeof(sguid),
+ &param->primary_path->dgid.global.interface_id);
+
+ return srpt_cm_req_recv(cm_id, param->port, param->primary_path->pkey,
+ private_data, sguid);
+}
+
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
enum ib_cm_rej_reason reason,
const u8 *private_data,
@@ -2206,7 +2356,8 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
}
/**
- * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
+ * @ch: SRPT RDMA channel.
*
* An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use).
@@ -2215,21 +2366,34 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
int ret;
- if (srpt_set_ch_state(ch, CH_LIVE)) {
- ret = srpt_ch_qp_rts(ch, ch->qp);
+ ret = srpt_ch_qp_rts(ch, ch->qp);
+ if (ret < 0) {
+ pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
+ ch->qp->qp_num);
+ srpt_close_ch(ch);
+ return;
+ }
- if (ret == 0) {
- /* Trigger wait list processing. */
- ret = srpt_zerolength_write(ch);
- WARN_ONCE(ret < 0, "%d\n", ret);
- } else {
- srpt_close_ch(ch);
- }
+ /*
+ * Note: calling srpt_close_ch() if the transition to the LIVE state
+ * fails is not necessary since that means that that function has
+ * already been invoked from another thread.
+ */
+ if (!srpt_set_ch_state(ch, CH_LIVE)) {
+ pr_err("%s-%d: channel transition to LIVE state failed\n",
+ ch->sess_name, ch->qp->qp_num);
+ return;
}
+
+ /* Trigger wait list processing. */
+ ret = srpt_zerolength_write(ch);
+ WARN_ONCE(ret < 0, "%d\n", ret);
}
/**
- * srpt_cm_handler() - IB connection manager callback function.
+ * srpt_cm_handler - IB connection manager callback function
+ * @cm_id: IB/CM connection identifier.
+ * @event: IB/CM event.
*
* A non-zero return value will cause the caller destroy the CM ID.
*
@@ -2246,8 +2410,8 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
ret = 0;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
- ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
- event->private_data);
+ ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
break;
case IB_CM_REJ_RECEIVED:
srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
@@ -2294,11 +2458,11 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+ return ioctx->state == SRPT_STATE_NEED_DATA;
}
/*
- * srpt_write_pending() - Start data transfer from initiator to target (write).
+ * srpt_write_pending - Start data transfer from initiator to target (write).
*/
static int srpt_write_pending(struct se_cmd *se_cmd)
{
@@ -2355,7 +2519,8 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
}
/**
- * srpt_queue_response() - Transmits the response to a SCSI command.
+ * srpt_queue_response - transmit the response to a SCSI command
+ * @cmd: SCSI target command.
*
* Callback function called by the TCM core. Must not block since it can be
* invoked on the context of the IB completion handler.
@@ -2369,13 +2534,11 @@ static void srpt_queue_response(struct se_cmd *cmd)
struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
struct ib_sge sge;
enum srpt_command_state state;
- unsigned long flags;
int resp_len, ret, i;
u8 srp_tm_status;
BUG_ON(!ch);
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEW:
@@ -2390,7 +2553,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
ch, ioctx->ioctx.index, ioctx->state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
@@ -2494,26 +2656,56 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
+static bool srpt_ch_list_empty(struct srpt_port *sport)
+{
+ struct srpt_nexus *nexus;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry)
+ if (!list_empty(&nexus->ch_list))
+ res = false;
+ rcu_read_unlock();
+
+ return res;
+}
+
/**
- * srpt_release_sdev() - Free the channel resources associated with a target.
+ * srpt_release_sport - disable login and wait for associated channels
+ * @sport: SRPT HCA port.
*/
-static int srpt_release_sdev(struct srpt_device *sdev)
+static int srpt_release_sport(struct srpt_port *sport)
{
- int i, res;
+ struct srpt_nexus *nexus, *next_n;
+ struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
- BUG_ON(!sdev);
-
- mutex_lock(&sdev->mutex);
- for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
- srpt_set_enabled(&sdev->port[i], false);
- mutex_unlock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
+ srpt_set_enabled(sport, false);
+ mutex_unlock(&sport->mutex);
+
+ while (wait_event_timeout(sport->ch_releaseQ,
+ srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for session unregistration ...\n",
+ sport->sdev->device->name, sport->port);
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ pr_info("%s-%d: state %s\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+ }
+ }
+ rcu_read_unlock();
+ }
- res = wait_event_interruptible(sdev->ch_releaseQ,
- list_empty_careful(&sdev->rch_list));
- if (res)
- pr_err("%s: interrupted.\n", __func__);
+ mutex_lock(&sport->mutex);
+ list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
+ list_del(&nexus->entry);
+ kfree_rcu(nexus, rcu);
+ }
+ mutex_unlock(&sport->mutex);
return 0;
}
@@ -2600,8 +2792,10 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
sdev->use_srq = true;
sdev->srq = srq;
- for (i = 0; i < sdev->srq_size; ++i)
+ for (i = 0; i < sdev->srq_size; ++i) {
+ INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+ }
return 0;
}
@@ -2623,7 +2817,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
}
/**
- * srpt_add_one() - Infiniband device addition callback function.
+ * srpt_add_one - InfiniBand device addition callback function
+ * @device: Describes a HCA.
*/
static void srpt_add_one(struct ib_device *device)
{
@@ -2638,9 +2833,7 @@ static void srpt_add_one(struct ib_device *device)
goto err;
sdev->device = device;
- INIT_LIST_HEAD(&sdev->rch_list);
- init_waitqueue_head(&sdev->ch_releaseQ);
- mutex_init(&sdev->mutex);
+ mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdev->pd))
@@ -2681,6 +2874,9 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
+ INIT_LIST_HEAD(&sport->nexus_list);
+ init_waitqueue_head(&sport->ch_releaseQ);
+ mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
@@ -2721,7 +2917,9 @@ err:
}
/**
- * srpt_remove_one() - InfiniBand device removal callback function.
+ * srpt_remove_one - InfiniBand device removal callback function
+ * @device: Describes a HCA.
+ * @client_data: The value passed as the third argument to ib_set_client_data().
*/
static void srpt_remove_one(struct ib_device *device, void *client_data)
{
@@ -2751,7 +2949,9 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
spin_lock(&srpt_dev_lock);
list_del(&sdev->list);
spin_unlock(&srpt_dev_lock);
- srpt_release_sdev(sdev);
+
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ srpt_release_sport(&sdev->port[i]);
srpt_free_srq(sdev);
@@ -2827,7 +3027,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
}
/**
- * srpt_close_session() - Forcibly close a session.
+ * srpt_close_session - forcibly close a session
+ * @se_sess: SCSI target session.
*
* Callback function invoked by the TCM core to clean up sessions associated
* with a node ACL when the user invokes
@@ -2836,15 +3037,13 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
static void srpt_close_session(struct se_session *se_sess)
{
struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
- struct srpt_device *sdev = ch->sport->sdev;
- mutex_lock(&sdev->mutex);
srpt_disconnect_ch_sync(ch);
- mutex_unlock(&sdev->mutex);
}
/**
- * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
+ * @se_sess: SCSI target session.
*
* A quote from RFC 4455 (SCSI-MIB) about this MIB object:
* This object represents an arbitrary integer used to uniquely identify a
@@ -2866,7 +3065,7 @@ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx);
+ return ioctx->state;
}
static int srpt_parse_guid(u64 *guid, const char *name)
@@ -2883,7 +3082,7 @@ out:
}
/**
- * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * srpt_parse_i_port_id - parse an initiator port ID
* @name: ASCII representation of a 128-bit initiator port ID.
* @i_port_id: Binary 128-bit port ID.
*/
@@ -3064,18 +3263,24 @@ static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
if (val != !!val)
return -EINVAL;
- ret = mutex_lock_interruptible(&sdev->mutex);
+ ret = mutex_lock_interruptible(&sdev->sdev_mutex);
if (ret < 0)
return ret;
+ ret = mutex_lock_interruptible(&sport->mutex);
+ if (ret < 0)
+ goto unlock_sdev;
enabled = sport->enabled;
/* Log out all initiator systems before changing 'use_srq'. */
srpt_set_enabled(sport, false);
sport->port_attrib.use_srq = val;
srpt_use_srq(sdev, sport->port_attrib.use_srq);
srpt_set_enabled(sport, enabled);
- mutex_unlock(&sdev->mutex);
+ ret = count;
+ mutex_unlock(&sport->mutex);
+unlock_sdev:
+ mutex_unlock(&sdev->sdev_mutex);
- return count;
+ return ret;
}
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
@@ -3104,7 +3309,6 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- struct srpt_device *sdev = sport->sdev;
unsigned long tmp;
int ret;
@@ -3119,9 +3323,9 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
return -EINVAL;
}
- mutex_lock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
srpt_set_enabled(sport, tmp);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
return count;
}
@@ -3134,8 +3338,10 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
};
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @wwn: Corresponds to $driver/$port.
+ * @group: Not used.
+ * @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
struct config_group *group,
@@ -3157,8 +3363,8 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @tpg: Target portal group to deregister.
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
@@ -3169,8 +3375,10 @@ static void srpt_drop_tpg(struct se_portal_group *tpg)
}
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port
+ * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
+ * @tf: Not used.
+ * @group: Not used.
+ * @name: $port.
*/
static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
@@ -3180,8 +3388,8 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port
+ * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
+ * @wwn: $port.
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
@@ -3239,7 +3447,7 @@ static const struct target_core_fabric_ops srpt_template = {
};
/**
- * srpt_init_module() - Kernel module initialization.
+ * srpt_init_module - kernel module initialization
*
* Note: Since ib_register_client() registers callback functions, and since at
* least one of these callback functions (srpt_add_one()) calls target core
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 673387d365a3..4d9199fd00dc 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -54,6 +54,8 @@
*/
#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+struct srpt_nexus;
+
enum {
/*
* SRP IOControllerProfile attributes for SRP target ports that have
@@ -114,7 +116,7 @@ enum {
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
- SRPT_RQ_SIZE = 128,
+ MAX_SRPT_RQ_SIZE = 128,
MIN_SRPT_SRQ_SIZE = 4,
DEFAULT_SRPT_SRQ_SIZE = 4095,
MAX_SRPT_SRQ_SIZE = 65535,
@@ -134,7 +136,7 @@ enum {
};
/**
- * enum srpt_command_state - SCSI command state managed by SRPT.
+ * enum srpt_command_state - SCSI command state managed by SRPT
* @SRPT_STATE_NEW: New command arrived and is being processed.
* @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
* for data arrival.
@@ -158,7 +160,8 @@ enum srpt_command_state {
};
/**
- * struct srpt_ioctx - Shared SRPT I/O context information.
+ * struct srpt_ioctx - shared SRPT I/O context information
+ * @cqe: Completion queue element.
* @buf: Pointer to the buffer.
* @dma: DMA address of the buffer.
* @index: Index of the I/O context in its ioctx_ring array.
@@ -171,7 +174,7 @@ struct srpt_ioctx {
};
/**
- * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * struct srpt_recv_ioctx - SRPT receive I/O context
* @ioctx: See above.
* @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
*/
@@ -187,13 +190,20 @@ struct srpt_rw_ctx {
};
/**
- * struct srpt_send_ioctx - SRPT send I/O context.
+ * struct srpt_send_ioctx - SRPT send I/O context
* @ioctx: See above.
* @ch: Channel pointer.
- * @spinlock: Protects 'state'.
+ * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed.
+ * @rw_ctxs: RDMA read/write contexts.
+ * @rdma_cqe: RDMA completion queue element.
+ * @free_list: Node in srpt_rdma_ch.free_list.
* @state: I/O context state.
* @cmd: Target core command data structure.
* @sense_data: SCSI sense data.
+ * @n_rdma: Number of work requests needed to transfer this ioctx.
+ * @n_rw_ctx: Size of rw_ctxs array.
+ * @queue_status_only: Send a SCSI status back to the initiator but no data.
+ * @sense_data: Sense data to be sent to the initiator.
*/
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
@@ -204,10 +214,8 @@ struct srpt_send_ioctx {
struct ib_cqe rdma_cqe;
struct list_head free_list;
- spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
- struct completion tx_done;
u8 n_rdma;
u8 n_rw_ctx;
bool queue_status_only;
@@ -215,7 +223,7 @@ struct srpt_send_ioctx {
};
/**
- * enum rdma_ch_state - SRP channel state.
+ * enum rdma_ch_state - SRP channel state
* @CH_CONNECTING: QP is in RTR state; waiting for RTU.
* @CH_LIVE: QP is in RTS state.
* @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
@@ -233,17 +241,19 @@ enum rdma_ch_state {
};
/**
- * struct srpt_rdma_ch - RDMA channel.
- * @cm_id: IB CM ID associated with the channel.
+ * struct srpt_rdma_ch - RDMA channel
+ * @nexus: I_T nexus this channel is associated with.
* @qp: IB queue pair used for communicating over this channel.
+ * @cm_id: IB CM ID associated with the channel.
* @cq: IB completion queue for this channel.
+ * @zw_cqe: Zero-length write CQE.
+ * @rcu: RCU head.
+ * @kref: kref for this channel.
* @rq_size: IB receive queue size.
- * @rsp_size IB response message size in bytes.
+ * @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
* @sport: pointer to the information of the HCA port used by this
* channel.
- * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
- * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
* @max_ti_iu_len: maximum target-to-initiator information unit length.
* @req_lim: request limit: maximum number of requests that may be sent
* by the initiator without having received a response.
@@ -251,30 +261,34 @@ enum rdma_ch_state {
* @spinlock: Protects free_list and state.
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
+ * @processing_wait_list: Whether or not cmd_wait_list is being processed.
* @ioctx_ring: Send ring.
* @ioctx_recv_ring: Receive I/O context ring.
- * @list: Node for insertion in the srpt_device.rch_list list.
+ * @list: Node in srpt_nexus.ch_list.
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
* list contains struct srpt_ioctx elements and is protected
* against concurrent modification by the cm_id spinlock.
+ * @pkey: P_Key of the IB partition for this SRP channel.
* @sess: Session information associated with this SRP channel.
* @sess_name: Session name.
- * @ini_guid: Initiator port GUID.
* @release_work: Allows scheduling of srpt_release_channel().
- * @release_done: Enables waiting for srpt_release_channel() completion.
*/
struct srpt_rdma_ch {
- struct ib_cm_id *cm_id;
+ struct srpt_nexus *nexus;
struct ib_qp *qp;
+ union {
+ struct {
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ };
struct ib_cq *cq;
struct ib_cqe zw_cqe;
+ struct rcu_head rcu;
struct kref kref;
int rq_size;
- u32 rsp_size;
+ u32 max_rsp_size;
atomic_t sq_wr_avail;
struct srpt_port *sport;
- u8 i_port_id[16];
- u8 t_port_id[16];
int max_ti_iu_len;
atomic_t req_lim;
atomic_t req_lim_delta;
@@ -285,15 +299,31 @@ struct srpt_rdma_ch {
struct srpt_recv_ioctx **ioctx_recv_ring;
struct list_head list;
struct list_head cmd_wait_list;
+ uint16_t pkey;
+ bool processing_wait_list;
struct se_session *sess;
- u8 sess_name[36];
- u8 ini_guid[24];
+ u8 sess_name[24];
struct work_struct release_work;
- struct completion *release_done;
};
/**
- * struct srpt_port_attib - Attributes for SRPT port
+ * struct srpt_nexus - I_T nexus
+ * @rcu: RCU head for this data structure.
+ * @entry: srpt_port.nexus_list list node.
+ * @ch_list: struct srpt_rdma_ch list. Protected by srpt_port.mutex.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ */
+struct srpt_nexus {
+ struct rcu_head rcu;
+ struct list_head entry;
+ struct list_head ch_list;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+};
+
+/**
+ * struct srpt_port_attib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
@@ -307,7 +337,7 @@ struct srpt_port_attrib {
};
/**
- * struct srpt_port - Information associated by SRPT with a single IB port.
+ * struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -323,7 +353,10 @@ struct srpt_port_attrib {
* @port_guid_wwn: WWN associated with target port GUID.
* @port_gid_tpg: TPG associated with target port GID.
* @port_gid_wwn: WWN associated with target port GID.
- * @port_acl_list: Head of the list with all node ACLs for this port.
+ * @port_attrib: Port attributes that can be accessed through configfs.
+ * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @mutex: Protects nexus_list.
+ * @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
struct srpt_port {
struct srpt_device *sdev;
@@ -341,21 +374,22 @@ struct srpt_port {
struct se_portal_group port_gid_tpg;
struct se_wwn port_gid_wwn;
struct srpt_port_attrib port_attrib;
+ wait_queue_head_t ch_releaseQ;
+ struct mutex mutex;
+ struct list_head nexus_list;
};
/**
- * struct srpt_device - Information associated by SRPT with a single HCA.
+ * struct srpt_device - information associated by SRPT with a single HCA
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
* @srq: Per-HCA SRQ (shared receive queue).
* @cm_id: Connection identifier.
* @srq_size: SRQ size.
+ * @sdev_mutex: Serializes use_srq changes.
* @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
- * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
- * @ch_releaseQ: Enables waiting for removal from rch_list.
- * @mutex: Protects rch_list.
* @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
@@ -367,11 +401,9 @@ struct srpt_device {
struct ib_srq *srq;
struct ib_cm_id *cm_id;
int srq_size;
+ struct mutex sdev_mutex;
bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
- struct list_head rch_list;
- wait_queue_head_t ch_releaseQ;
- struct mutex mutex;
struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 925571475005..0193dd4f0452 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -635,11 +635,11 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int evdev_poll(struct file *file, poll_table *wait)
+static __poll_t evdev_poll(struct file *file, poll_table *wait)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &evdev->wait, wait);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e30642db50d5..0d0b2ab1bb6b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1048,7 +1048,7 @@ static inline void input_wakeup_procfs_readers(void)
wake_up(&input_devices_poll_wait);
}
-static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
+static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &input_devices_poll_wait, wait);
if (file->f_version != input_devices_state) {
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 7b29a8944039..fe3255572886 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -436,7 +436,7 @@ static ssize_t joydev_read(struct file *file, char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int joydev_poll(struct file *file, poll_table *wait)
+static __poll_t joydev_poll(struct file *file, poll_table *wait)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 1c8c56efc995..9c3f7ec3bd3d 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -408,7 +408,7 @@ static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
return retval;
}
-static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait)
+static __poll_t hp_sdc_rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 39ddd9a73feb..91df0df15e68 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -694,7 +694,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
return retval;
}
-static unsigned int uinput_poll(struct file *file, poll_table *wait)
+static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 2d7f691ec71c..731d84ae5101 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -757,11 +757,11 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int mousedev_poll(struct file *file, poll_table *wait)
+static __poll_t mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &mousedev->wait, wait);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 516f9fe77a17..fccf55a380b2 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -239,11 +239,11 @@ out:
return retval;
}
-static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
+static __poll_t serio_raw_poll(struct file *file, poll_table *wait)
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &serio_raw->wait, wait);
diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c
index df1fd41860ac..a63de06b08bc 100644
--- a/drivers/input/serio/userio.c
+++ b/drivers/input/serio/userio.c
@@ -248,7 +248,7 @@ out:
return error ?: count;
}
-static unsigned int userio_char_poll(struct file *file, poll_table *wait)
+static __poll_t userio_char_poll(struct file *file, poll_table *wait)
{
struct userio_device *userio = file->private_data;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4a2de34895ec..a1373cf34326 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4808,7 +4808,7 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
-#ifdef CONFIG_SWIOTLB
+#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
swiotlb = 0;
#endif
dma_ops = &intel_dma_ops;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c70476b34a53..d913aec85109 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -343,4 +343,12 @@ config MESON_IRQ_GPIO
help
Support Meson SoC Family GPIO Interrupt Multiplexer
+config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+ for Goldfish based virtual platforms.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d2df34a54d38..d27e3e3619e0 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,3 +84,4 @@ obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
+obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 667b9e14b032..dfe4a460340b 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -98,13 +98,35 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
};
-static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
-{
- int irq = irq_create_mapping(intc.domain, hwirq);
+static int bcm2836_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ switch (hw) {
+ case LOCAL_IRQ_CNTPSIRQ:
+ case LOCAL_IRQ_CNTPNSIRQ:
+ case LOCAL_IRQ_CNTHPIRQ:
+ case LOCAL_IRQ_CNTVIRQ:
+ chip = &bcm2836_arm_irqchip_timer;
+ break;
+ case LOCAL_IRQ_GPU_FAST:
+ chip = &bcm2836_arm_irqchip_gpu;
+ break;
+ case LOCAL_IRQ_PMU_FAST:
+ chip = &bcm2836_arm_irqchip_pmu;
+ break;
+ default:
+ pr_warn_once("Unexpected hw irq: %lu\n", hw);
+ return -EINVAL;
+ }
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ return 0;
}
static void
@@ -165,7 +187,8 @@ static int bcm2836_cpu_dying(unsigned int cpu)
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
- .xlate = irq_domain_xlate_onecell
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = bcm2836_map,
};
static void
@@ -218,19 +241,6 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
- &bcm2836_arm_irqchip_gpu);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
- &bcm2836_arm_irqchip_pmu);
-
bcm2836_arm_irqchip_smp_init();
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b56c3e23f0af..a57c0fbbd34a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1070,31 +1070,6 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
return 0;
}
-static int get_cpu_number(struct device_node *dn)
-{
- const __be32 *cell;
- u64 hwid;
- int cpu;
-
- cell = of_get_property(dn, "reg", NULL);
- if (!cell)
- return -1;
-
- hwid = of_read_number(cell, of_n_addr_cells(dn));
-
- /*
- * Non affinity bits must be set to 0 in the DT
- */
- if (hwid & ~MPIDR_HWID_BITMASK)
- return -1;
-
- for_each_possible_cpu(cpu)
- if (cpu_logical_map(cpu) == hwid)
- return cpu;
-
- return -1;
-}
-
/* Create all possible partitions at boot time */
static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
{
@@ -1145,8 +1120,8 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(!cpu_node))
continue;
- cpu = get_cpu_number(cpu_node);
- if (WARN_ON(cpu == -1))
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (WARN_ON(cpu < 0))
continue;
pr_cont("%pOF[%d] ", cpu_node, cpu);
@@ -1331,6 +1306,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
+ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
@@ -1380,6 +1359,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
return 0;
+ /*
+ * It's perfectly valid firmware can pass disabled GICC entry, driver
+ * should not treat as errors, skip the entry instead of probe fail.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 000000000000..2a92f03c73e4
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS 32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE 8
+
+#define GFPIC_REG_IRQ_PENDING 0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL 0x08
+#define GFPIC_REG_IRQ_DISABLE 0x0c
+#define GFPIC_REG_IRQ_ENABLE 0x10
+
+struct goldfish_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+ struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct goldfish_pic_data *gfpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+ if (!gfpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gfpic->base = of_iomap(of_node, 0);
+ if (!gfpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ /* Mask interrupts. */
+ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+ gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+ handle_level_irq);
+ if (!gc) {
+ pr_err("Failed to allocate chip structures!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ct = gc->chip_types;
+ ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+ ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+ gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0,
+ &goldfish_irq_domain_ops,
+ NULL);
+ if (!gfpic->irq_domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_destroy_generic_chip;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ goldfish_pic_cascade, gfpic);
+
+ pr_info("Successfully registered.\n");
+ return 0;
+
+out_destroy_generic_chip:
+ irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+out_iounmap:
+ iounmap(gfpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(gfpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
index cf6d0c455518..e66ef4373b1e 100644
--- a/drivers/irqchip/irq-ompic.c
+++ b/drivers/irqchip/irq-ompic.c
@@ -171,9 +171,9 @@ static int __init ompic_of_init(struct device_node *node,
/* Setup the device */
ompic_base = ioremap(res.start, resource_size(&res));
- if (IS_ERR(ompic_base)) {
+ if (!ompic_base) {
pr_err("ompic: unable to map registers");
- return PTR_ERR(ompic_base);
+ return -ENOMEM;
}
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index dde8f46bc254..e268811dc544 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -724,11 +724,11 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
return count;
}
-static unsigned int
+static __poll_t
capi_poll(struct file *file, poll_table *wait)
{
struct capidev *cdev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!cdev->ap.applid)
return POLLERR;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 1c5dc345e7c5..34b7704042a4 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -119,10 +119,10 @@ isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_
/***************************************/
/* select routines for various kernels */
/***************************************/
-static unsigned int
+static __poll_t
isdn_divert_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &(rd_queue), wait);
/* mask = POLLOUT | POLLWRNORM; */
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index 72e58bf07577..70f16102a001 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -98,9 +98,9 @@ void diva_os_get_time(dword *sec, dword *usec)
/*
* device node operations
*/
-static unsigned int maint_poll(struct file *file, poll_table *wait)
+static __poll_t maint_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &msgwaitq, wait);
mask = POLLOUT | POLLWRNORM;
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 0033d74a7291..da5cc5ab7e2d 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -74,7 +74,7 @@ static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count,
loff_t *offset);
static ssize_t um_idi_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset);
-static unsigned int um_idi_poll(struct file *file, poll_table *wait);
+static __poll_t um_idi_poll(struct file *file, poll_table *wait);
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
@@ -365,7 +365,7 @@ um_idi_write(struct file *file, const char __user *buf, size_t count,
return (ret);
}
-static unsigned int um_idi_poll(struct file *file, poll_table *wait)
+static __poll_t um_idi_poll(struct file *file, poll_table *wait)
{
diva_um_idi_os_context_t *p_os;
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b2023e08dcd2..fbc788e6f0db 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -650,7 +650,7 @@ static ssize_t divas_read(struct file *file, char __user *buf,
return (ret);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
if (!file->private_data) {
return (POLLERR);
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index b57efd6ad916..3478f6f099eb 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -99,7 +99,7 @@ divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off
return (-ENODEV);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
return (POLLERR);
}
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index aaca0b3d662e..6abea6915f49 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -281,10 +281,10 @@ hysdn_log_close(struct inode *ino, struct file *filep)
/*************************************************/
/* select/poll routine to be able using select() */
/*************************************************/
-static unsigned int
+static __poll_t
hysdn_log_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
hysdn_card *card = PDE_DATA(file_inode(file));
struct procdata *pd = card->proclog;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 8b03d618185e..0521c32949d4 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1227,10 +1227,10 @@ out:
return retval;
}
-static unsigned int
+static __poll_t
isdn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned int minor = iminor(file_inode(file));
int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index e07aefb9151d..57884319b4b1 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -685,10 +685,10 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
-unsigned int
+__poll_t
isdn_ppp_poll(struct file *file, poll_table *wait)
{
- u_int mask;
+ __poll_t mask;
struct ippp_buf_queue *bf, *bl;
u_long flags;
struct ippp_struct *is;
diff --git a/drivers/isdn/i4l/isdn_ppp.h b/drivers/isdn/i4l/isdn_ppp.h
index 4e9b8935a4eb..34b8a2ce84f3 100644
--- a/drivers/isdn/i4l/isdn_ppp.h
+++ b/drivers/isdn/i4l/isdn_ppp.h
@@ -23,7 +23,7 @@ extern int isdn_ppp_autodial_filter(struct sk_buff *, isdn_net_local *);
extern int isdn_ppp_xmit(struct sk_buff *, struct net_device *);
extern void isdn_ppp_receive(isdn_net_dev *, isdn_net_local *, struct sk_buff *);
extern int isdn_ppp_dev_ioctl(struct net_device *, struct ifreq *, int);
-extern unsigned int isdn_ppp_poll(struct file *, struct poll_table_struct *);
+extern __poll_t isdn_ppp_poll(struct file *, struct poll_table_struct *);
extern int isdn_ppp_ioctl(int, struct file *, unsigned int, unsigned long);
extern void isdn_ppp_release(int, struct file *);
extern int isdn_ppp_dial_slave(char *);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index e3654782a3e2..21d50e4cc5e1 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -645,8 +645,10 @@ l1oip_socket_thread(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
int ret = 0;
- struct msghdr msg;
struct sockaddr_in sin_rx;
+ struct kvec iov;
+ struct msghdr msg = {.msg_name = &sin_rx,
+ .msg_namelen = sizeof(sin_rx)};
unsigned char *recvbuf;
size_t recvbuf_size = 1500;
int recvlen;
@@ -661,6 +663,9 @@ l1oip_socket_thread(void *data)
goto fail;
}
+ iov.iov_base = recvbuf;
+ iov.iov_len = recvbuf_size;
+
/* make daemon */
allow_signal(SIGTERM);
@@ -697,12 +702,6 @@ l1oip_socket_thread(void *data)
goto fail;
}
- /* build receive message */
- msg.msg_name = &sin_rx;
- msg.msg_namelen = sizeof(sin_rx);
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
-
/* build send message */
hc->sendmsg.msg_name = &hc->sin_remote;
hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
@@ -719,12 +718,9 @@ l1oip_socket_thread(void *data)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
- struct kvec iov = {
- .iov_base = recvbuf,
- .iov_len = recvbuf_size,
- };
- recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
- recvbuf_size, 0);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1,
+ recvbuf_size);
+ recvlen = sock_recvmsg(socket, &msg, 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
} else {
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c50a34340f67..f4272d4e0a26 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -141,11 +141,11 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
return ret;
}
-static unsigned int
+static __poll_t
mISDN_poll(struct file *filep, poll_table *wait)
{
struct mISDNtimerdev *dev = filep->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 318a28fd58fe..3e763d2a0cb3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -137,6 +137,13 @@ config LEDS_LM3642
converter plus 1.5A constant current driver for a high-current
white LED.
+config LEDS_LM3692X
+ tristate "LED support for LM3692x Chips"
+ depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
+ help
+ This option enables support for the TI LM3692x family
+ of white LED string drivers used for backlighting.
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
@@ -347,7 +354,7 @@ config LEDS_LP8788
config LEDS_LP8860
tristate "LED support for the TI LP8860 4 channel LED driver"
- depends on LEDS_CLASS && I2C
+ depends on LEDS_CLASS && I2C && OF
select REGMAP_I2C
help
If you say yes here you get support for the TI LP8860 4 channel
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a2a6b5a4f86d..987884a5b9a5 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o
+obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index 9a257f969300..f883616d9e60 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -360,7 +360,8 @@ static int as3645a_set_flash_brightness(struct led_classdev_flash *fled,
{
struct as3645a *flash = fled_to_as3645a(fled);
- flash->flash_current = as3645a_current_to_reg(flash, true, brightness_ua);
+ flash->flash_current = as3645a_current_to_reg(flash, true,
+ brightness_ua);
return as3645a_set_current(flash);
}
@@ -455,8 +456,8 @@ static int as3645a_detect(struct as3645a *flash)
/* Verify the chip model and version. */
if (model != 0x01 || rfu != 0x00) {
- dev_err(dev, "AS3645A not detected "
- "(model %d rfu %d)\n", model, rfu);
+ dev_err(dev, "AS3645A not detected (model %d rfu %d)\n",
+ model, rfu);
return -ENODEV;
}
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index d03ed6b4176b..851c1920b63c 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -549,8 +549,12 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
/* make sure the blinkM is balanced (read/writes) */
while (count > 0) {
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
if (tmpargs[0] == 0x09)
count = 0;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
new file mode 100644
index 000000000000..437173d1712c
--- /dev/null
+++ b/drivers/leds/leds-lm3692x.c
@@ -0,0 +1,393 @@
+/*
+ * TI lm3692x LED Driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Data sheet is located
+ * http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+#define LM3692X_REV 0x0
+#define LM3692X_RESET 0x1
+#define LM3692X_EN 0x10
+#define LM3692X_BRT_CTRL 0x11
+#define LM3692X_PWM_CTRL 0x12
+#define LM3692X_BOOST_CTRL 0x13
+#define LM3692X_AUTO_FREQ_HI 0x15
+#define LM3692X_AUTO_FREQ_LO 0x16
+#define LM3692X_BL_ADJ_THRESH 0x17
+#define LM3692X_BRT_LSB 0x18
+#define LM3692X_BRT_MSB 0x19
+#define LM3692X_FAULT_CTRL 0x1e
+#define LM3692X_FAULT_FLAGS 0x1f
+
+#define LM3692X_SW_RESET BIT(0)
+#define LM3692X_DEVICE_EN BIT(0)
+#define LM3692X_LED1_EN BIT(1)
+#define LM3692X_LED2_EN BIT(2)
+
+/* Brightness Control Bits */
+#define LM3692X_BL_ADJ_POL BIT(0)
+#define LM3692X_RAMP_RATE_125us 0x00
+#define LM3692X_RAMP_RATE_250us BIT(1)
+#define LM3692X_RAMP_RATE_500us BIT(2)
+#define LM3692X_RAMP_RATE_1ms (BIT(1) | BIT(2))
+#define LM3692X_RAMP_RATE_2ms BIT(3)
+#define LM3692X_RAMP_RATE_4ms (BIT(3) | BIT(1))
+#define LM3692X_RAMP_RATE_8ms (BIT(2) | BIT(3))
+#define LM3692X_RAMP_RATE_16ms (BIT(1) | BIT(2) | BIT(3))
+#define LM3692X_RAMP_EN BIT(4)
+#define LM3692X_BRHT_MODE_REG 0x00
+#define LM3692X_BRHT_MODE_PWM BIT(5)
+#define LM3692X_BRHT_MODE_MULTI_RAMP BIT(6)
+#define LM3692X_BRHT_MODE_RAMP_MULTI (BIT(5) | BIT(6))
+#define LM3692X_MAP_MODE_EXP BIT(7)
+
+/* PWM Register Bits */
+#define LM3692X_PWM_FILTER_100 BIT(0)
+#define LM3692X_PWM_FILTER_150 BIT(1)
+#define LM3692X_PWM_FILTER_200 (BIT(0) | BIT(1))
+#define LM3692X_PWM_HYSTER_1LSB BIT(2)
+#define LM3692X_PWM_HYSTER_2LSB BIT(3)
+#define LM3692X_PWM_HYSTER_3LSB (BIT(3) | BIT(2))
+#define LM3692X_PWM_HYSTER_4LSB BIT(4)
+#define LM3692X_PWM_HYSTER_5LSB (BIT(4) | BIT(2))
+#define LM3692X_PWM_HYSTER_6LSB (BIT(4) | BIT(3))
+#define LM3692X_PWM_POLARITY BIT(5)
+#define LM3692X_PWM_SAMP_4MHZ BIT(6)
+#define LM3692X_PWM_SAMP_24MHZ BIT(7)
+
+/* Boost Control Bits */
+#define LM3692X_OCP_PROT_1A BIT(0)
+#define LM3692X_OCP_PROT_1_25A BIT(1)
+#define LM3692X_OCP_PROT_1_5A (BIT(0) | BIT(1))
+#define LM3692X_OVP_21V BIT(2)
+#define LM3692X_OVP_25V BIT(3)
+#define LM3692X_OVP_29V (BIT(2) | BIT(3))
+#define LM3692X_MIN_IND_22UH BIT(4)
+#define LM3692X_BOOST_SW_1MHZ BIT(5)
+#define LM3692X_BOOST_SW_NO_SHIFT BIT(6)
+
+/* Fault Control Bits */
+#define LM3692X_FAULT_CTRL_OVP BIT(0)
+#define LM3692X_FAULT_CTRL_OCP BIT(1)
+#define LM3692X_FAULT_CTRL_TSD BIT(2)
+#define LM3692X_FAULT_CTRL_OPEN BIT(3)
+
+/* Fault Flag Bits */
+#define LM3692X_FAULT_FLAG_OVP BIT(0)
+#define LM3692X_FAULT_FLAG_OCP BIT(1)
+#define LM3692X_FAULT_FLAG_TSD BIT(2)
+#define LM3692X_FAULT_FLAG_SHRT BIT(3)
+#define LM3692X_FAULT_FLAG_OPEN BIT(4)
+
+/**
+ * struct lm3692x_led -
+ * @lock - Lock for reading/writing the device
+ * @client - Pointer to the I2C client
+ * @led_dev - LED class device pointer
+ * @regmap - Devices register map
+ * @enable_gpio - VDDIO/EN gpio to enable communication interface
+ * @regulator - LED supply regulator pointer
+ * @label - LED label
+ */
+struct lm3692x_led {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ char label[LED_MAX_NAME_SIZE];
+};
+
+static const struct reg_default lm3692x_reg_defs[] = {
+ {LM3692X_EN, 0xf},
+ {LM3692X_BRT_CTRL, 0x61},
+ {LM3692X_PWM_CTRL, 0x73},
+ {LM3692X_BOOST_CTRL, 0x6f},
+ {LM3692X_AUTO_FREQ_HI, 0x0},
+ {LM3692X_AUTO_FREQ_LO, 0x0},
+ {LM3692X_BL_ADJ_THRESH, 0x0},
+ {LM3692X_BRT_LSB, 0x7},
+ {LM3692X_BRT_MSB, 0xff},
+ {LM3692X_FAULT_CTRL, 0x7},
+};
+
+static const struct regmap_config lm3692x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LM3692X_FAULT_FLAGS,
+ .reg_defaults = lm3692x_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lm3692x_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int lm3692x_fault_check(struct lm3692x_led *led)
+{
+ int ret;
+ unsigned int read_buf;
+
+ ret = regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (ret)
+ return ret;
+
+ if (read_buf)
+ dev_err(&led->client->dev, "Detected a fault 0x%X\n", read_buf);
+
+ /* The first read may clear the fault. Check again to see if the fault
+ * still exits and return that value.
+ */
+ regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (read_buf)
+ dev_err(&led->client->dev, "Second read of fault flags 0x%X\n",
+ read_buf);
+
+ return read_buf;
+}
+
+static int lm3692x_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm3692x_led *led =
+ container_of(led_cdev, struct lm3692x_led, led_dev);
+ int ret;
+ int led_brightness_lsb = (brt_val >> 5);
+
+ mutex_lock(&led->lock);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write MSB\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write LSB\n");
+ goto out;
+ }
+out:
+ mutex_unlock(&led->lock);
+ return ret;
+}
+
+static int lm3692x_init(struct lm3692x_led *led)
+{
+ int ret;
+
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 1);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL, 0x00);
+ if (ret)
+ goto out;
+
+ /*
+ * For glitch free operation, the following data should
+ * only be written while device enable bit is 0
+ * per Section 7.5.14 of the data sheet
+ */
+ ret = regmap_write(led->regmap, LM3692X_PWM_CTRL,
+ LM3692X_PWM_FILTER_100 | LM3692X_PWM_SAMP_24MHZ);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
+ LM3692X_BRHT_MODE_RAMP_MULTI |
+ LM3692X_BL_ADJ_POL |
+ LM3692X_RAMP_RATE_250us);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_HI, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_LO, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BL_ADJ_THRESH, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL,
+ LM3692X_BL_ADJ_POL | LM3692X_PWM_HYSTER_4LSB);
+ if (ret)
+ goto out;
+
+ return ret;
+out:
+ dev_err(&led->client->dev, "Fail writing initialization values\n");
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ return ret;
+}
+
+static int lm3692x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lm3692x_led *led;
+ struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::backlight_cluster", id->name);
+ };
+
+ led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(led->enable_gpio)) {
+ ret = PTR_ERR(led->enable_gpio);
+ dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ return ret;
+ }
+
+ led->regulator = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(led->regulator))
+ led->regulator = NULL;
+
+ led->client = client;
+ led->led_dev.name = led->label;
+ led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
+
+ mutex_init(&led->lock);
+
+ i2c_set_clientdata(client, led);
+
+ led->regmap = devm_regmap_init_i2c(client, &lm3692x_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lm3692x_init(led);
+ if (ret)
+ return ret;
+
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3692x_remove(struct i2c_client *client)
+{
+ struct lm3692x_led *led = i2c_get_clientdata(client);
+ int ret;
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ mutex_destroy(&led->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3692x_id[] = {
+ { "lm36922", 0 },
+ { "lm36923", 1 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm3692x_id);
+
+static const struct of_device_id of_lm3692x_leds_match[] = {
+ { .compatible = "ti,lm36922", },
+ { .compatible = "ti,lm36923", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lm3692x_leds_match);
+
+static struct i2c_driver lm3692x_driver = {
+ .driver = {
+ .name = "lm3692x",
+ .of_match_table = of_lm3692x_leds_match,
+ },
+ .probe = lm3692x_probe,
+ .remove = lm3692x_remove,
+ .id_table = lm3692x_id,
+};
+module_i2c_driver(lm3692x_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LM3692X LED driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 3e70775a2d54..39c72a908f3b 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -22,6 +22,7 @@
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
#define LP8860_DISP_CL1_BRT_MSB 0x00
#define LP8860_DISP_CL1_BRT_LSB 0x01
@@ -86,8 +87,6 @@
#define LP8860_CLEAR_FAULTS 0x01
-#define LP8860_DISP_LED_NAME "display_cluster"
-
/**
* struct lp8860_led -
* @lock - Lock for reading/writing the device
@@ -98,7 +97,7 @@
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
* @label - LED label
-**/
+ */
struct lp8860_led {
struct mutex lock;
struct i2c_client *client;
@@ -107,7 +106,7 @@ struct lp8860_led {
struct regmap *eeprom_regmap;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
- const char *label;
+ char label[LED_MAX_NAME_SIZE];
};
struct lp8860_eeprom_reg {
@@ -247,6 +246,15 @@ static int lp8860_init(struct lp8860_led *led)
unsigned int read_buf;
int ret, i, reg_count;
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 1);
@@ -282,12 +290,25 @@ static int lp8860_init(struct lp8860_led *led)
ret = regmap_write(led->regmap,
LP8860_EEPROM_CNTRL,
LP8860_PROGRAM_EEPROM);
- if (ret)
+ if (ret) {
dev_err(&led->client->dev, "Failed programming EEPROM\n");
+ goto out;
+ }
+
+ return ret;
+
out:
if (ret)
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
return ret;
}
@@ -365,19 +386,25 @@ static int lp8860_probe(struct i2c_client *client,
int ret;
struct lp8860_led *led;
struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
- led->label = LP8860_DISP_LED_NAME;
-
- if (client->dev.of_node) {
- ret = of_property_read_string(np, "label", &led->label);
- if (ret) {
- dev_err(&client->dev, "Missing label in dt\n");
- return -EINVAL;
- }
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label), "%s:%s",
+ id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::display_cluster", id->name);
}
led->enable_gpio = devm_gpiod_get_optional(&client->dev,
@@ -394,7 +421,6 @@ static int lp8860_probe(struct i2c_client *client,
led->client = client;
led->led_dev.name = led->label;
- led->led_dev.max_brightness = LED_FULL;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
mutex_init(&led->lock);
@@ -421,7 +447,7 @@ static int lp8860_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = led_classdev_register(&client->dev, &led->led_dev);
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
if (ret) {
dev_err(&client->dev, "led register err: %d\n", ret);
return ret;
@@ -435,8 +461,6 @@ static int lp8860_remove(struct i2c_client *client)
struct lp8860_led *led = i2c_get_clientdata(client);
int ret;
- led_classdev_unregister(&led->led_dev);
-
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -447,6 +471,8 @@ static int lp8860_remove(struct i2c_client *client)
"Failed to disable regulator\n");
}
+ mutex_destroy(&led->lock);
+
return 0;
}
@@ -456,18 +482,16 @@ static const struct i2c_device_id lp8860_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp8860_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp8860_leds_match[] = {
{ .compatible = "ti,lp8860", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp8860_leds_match);
-#endif
static struct i2c_driver lp8860_driver = {
.driver = {
.name = "lp8860",
- .of_match_table = of_match_ptr(of_lp8860_leds_match),
+ .of_match_table = of_lp8860_leds_match,
},
.probe = lp8860_probe,
.remove = lp8860_remove,
@@ -477,4 +501,4 @@ module_i2c_driver(lp8860_driver);
MODULE_DESCRIPTION("Texas Instruments LP8860 LED driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index a52674327857..8988ba3b2d65 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev)
if (!led)
return -ENOMEM;
- led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+ led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
map = dev_get_regmap(pdev->dev.parent, NULL);
if (!map) {
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 8d456dc6c5bf..df80c89ebe7f 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index bb090216b4dc..a2559b4fdfff 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -81,7 +81,7 @@ config LEDS_TRIGGER_ACTIVITY
tristate "LED activity Trigger"
depends on LEDS_TRIGGERS
help
- This allows LEDs to be controlled by a immediate CPU usage.
+ This allows LEDs to be controlled by an immediate CPU usage.
The flash frequency and duty cycle varies from faint flashes to
intense brightness depending on the instant CPU load.
If unsure, say N.
@@ -135,4 +135,11 @@ config LEDS_TRIGGER_PANIC
a different trigger.
If unsure, say Y.
+config LEDS_TRIGGER_NETDEV
+ tristate "LED Netdev Trigger"
+ depends on NET && LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 4a8b6cff7761..f3cfe1950538 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
new file mode 100644
index 000000000000..6df4781a6308
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 Ben Whitten <ben.whitten@gmail.com>
+// Copyright 2007 Oliver Jowett <oliver@opencloud.com>
+//
+// LED Kernel Netdev Trigger
+//
+// Toggles the LED to reflect the link and traffic state of a named net device
+//
+// Derived from ledtrig-timer.c which is:
+// Copyright 2005-2006 Openedhand Ltd.
+// Author: Richard Purdie <rpurdie@openedhand.com>
+
+#include <linux/atomic.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include "../leds.h"
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - network device name to monitor
+ * interval - duration of LED blink, in milliseconds
+ * link - LED's normal state reflects whether the link is up
+ * (has carrier) or not
+ * tx - LED blinks on transmitted data
+ * rx - LED blinks on receive data
+ *
+ */
+
+struct led_netdev_data {
+ spinlock_t lock;
+
+ struct delayed_work work;
+ struct notifier_block notifier;
+
+ struct led_classdev *led_cdev;
+ struct net_device *net_dev;
+
+ char device_name[IFNAMSIZ];
+ atomic_t interval;
+ unsigned int last_activity;
+
+ unsigned long mode;
+#define NETDEV_LED_LINK 0
+#define NETDEV_LED_TX 1
+#define NETDEV_LED_RX 2
+#define NETDEV_LED_MODE_LINKUP 3
+};
+
+enum netdev_led_attr {
+ NETDEV_ATTR_LINK,
+ NETDEV_ATTR_TX,
+ NETDEV_ATTR_RX
+};
+
+static void set_baseline_state(struct led_netdev_data *trigger_data)
+{
+ int current_brightness;
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
+
+ current_brightness = led_cdev->brightness;
+ if (current_brightness)
+ led_cdev->blink_brightness = current_brightness;
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
+
+ if (!test_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode))
+ led_set_brightness(led_cdev, LED_OFF);
+ else {
+ if (test_bit(NETDEV_LED_LINK, &trigger_data->mode))
+ led_set_brightness(led_cdev,
+ led_cdev->blink_brightness);
+ else
+ led_set_brightness(led_cdev, LED_OFF);
+
+ /* If we are looking for RX/TX start periodically
+ * checking stats
+ */
+ if (test_bit(NETDEV_LED_TX, &trigger_data->mode) ||
+ test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ schedule_delayed_work(&trigger_data->work, 0);
+ }
+}
+
+static ssize_t device_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ ssize_t len;
+
+ spin_lock_bh(&trigger_data->lock);
+ len = sprintf(buf, "%s\n", trigger_data->device_name);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return len;
+}
+
+static ssize_t device_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (size >= IFNAMSIZ)
+ return -EINVAL;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+
+ strncpy(trigger_data->device_name, buf, size);
+ if (size > 0 && trigger_data->device_name[size - 1] == '\n')
+ trigger_data->device_name[size - 1] = 0;
+
+ if (trigger_data->device_name[0] != 0)
+ trigger_data->net_dev =
+ dev_get_by_name(&init_net, trigger_data->device_name);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ if (trigger_data->net_dev != NULL)
+ if (netif_carrier_ok(trigger_data->net_dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+
+ trigger_data->last_activity = 0;
+
+ set_baseline_state(trigger_data);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(device_name);
+
+static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
+ enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ int bit;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%u\n", test_bit(bit, &trigger_data->mode));
+}
+
+static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
+ size_t size, enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long state;
+ int ret;
+ int bit;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (state)
+ set_bit(bit, &trigger_data->mode);
+ else
+ clear_bit(bit, &trigger_data->mode);
+
+ set_baseline_state(trigger_data);
+
+ return size;
+}
+
+static ssize_t link_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_LINK);
+}
+
+static ssize_t link_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_LINK);
+}
+
+static DEVICE_ATTR_RW(link);
+
+static ssize_t tx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_TX);
+}
+
+static ssize_t tx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_TX);
+}
+
+static DEVICE_ATTR_RW(tx);
+
+static ssize_t rx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_RX);
+}
+
+static ssize_t rx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_RX);
+}
+
+static DEVICE_ATTR_RW(rx);
+
+static ssize_t interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%u\n",
+ jiffies_to_msecs(atomic_read(&trigger_data->interval)));
+}
+
+static ssize_t interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ /* impose some basic bounds on the timer interval */
+ if (value >= 5 && value <= 10000) {
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ set_baseline_state(trigger_data); /* resets timer */
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(interval);
+
+static int netdev_trig_notify(struct notifier_block *nb,
+ unsigned long evt, void *dv)
+{
+ struct net_device *dev =
+ netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
+ struct led_netdev_data *trigger_data = container_of(nb,
+ struct
+ led_netdev_data,
+ notifier);
+
+ if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
+ && evt != NETDEV_CHANGENAME)
+ return NOTIFY_DONE;
+
+ if (strcmp(dev->name, trigger_data->device_name))
+ return NOTIFY_DONE;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ switch (evt) {
+ case NETDEV_REGISTER:
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+ dev_hold(dev);
+ trigger_data->net_dev = dev;
+ break;
+ case NETDEV_CHANGENAME:
+ case NETDEV_UNREGISTER:
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ break;
+ }
+
+ set_baseline_state(trigger_data);
+
+ spin_unlock_bh(&trigger_data->lock);
+
+ return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void netdev_trig_work(struct work_struct *work)
+{
+ struct led_netdev_data *trigger_data = container_of(work,
+ struct
+ led_netdev_data,
+ work.work);
+ struct rtnl_link_stats64 *dev_stats;
+ unsigned int new_activity;
+ struct rtnl_link_stats64 temp;
+ unsigned long interval;
+ int invert;
+
+ /* If we dont have a device, insure we are off */
+ if (!trigger_data->net_dev) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ return;
+ }
+
+ /* If we are not looking for RX/TX then return */
+ if (!test_bit(NETDEV_LED_TX, &trigger_data->mode) &&
+ !test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ return;
+
+ dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+ new_activity =
+ (test_bit(NETDEV_LED_TX, &trigger_data->mode) ?
+ dev_stats->tx_packets : 0) +
+ (test_bit(NETDEV_LED_RX, &trigger_data->mode) ?
+ dev_stats->rx_packets : 0);
+
+ if (trigger_data->last_activity != new_activity) {
+ led_stop_software_blink(trigger_data->led_cdev);
+
+ invert = test_bit(NETDEV_LED_LINK, &trigger_data->mode);
+ interval = jiffies_to_msecs(
+ atomic_read(&trigger_data->interval));
+ /* base state is ON (link present) */
+ led_blink_set_oneshot(trigger_data->led_cdev,
+ &interval,
+ &interval,
+ invert);
+ trigger_data->last_activity = new_activity;
+ }
+
+ schedule_delayed_work(&trigger_data->work,
+ (atomic_read(&trigger_data->interval)*2));
+}
+
+static void netdev_trig_activate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data;
+ int rc;
+
+ trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
+ if (!trigger_data)
+ return;
+
+ spin_lock_init(&trigger_data->lock);
+
+ trigger_data->notifier.notifier_call = netdev_trig_notify;
+ trigger_data->notifier.priority = 10;
+
+ INIT_DELAYED_WORK(&trigger_data->work, netdev_trig_work);
+
+ trigger_data->led_cdev = led_cdev;
+ trigger_data->net_dev = NULL;
+ trigger_data->device_name[0] = 0;
+
+ trigger_data->mode = 0;
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(50));
+ trigger_data->last_activity = 0;
+
+ led_cdev->trigger_data = trigger_data;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+ if (rc)
+ goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_link);
+ if (rc)
+ goto err_out_device_name;
+ rc = device_create_file(led_cdev->dev, &dev_attr_rx);
+ if (rc)
+ goto err_out_link;
+ rc = device_create_file(led_cdev->dev, &dev_attr_tx);
+ if (rc)
+ goto err_out_rx;
+ rc = device_create_file(led_cdev->dev, &dev_attr_interval);
+ if (rc)
+ goto err_out_tx;
+ rc = register_netdevice_notifier(&trigger_data->notifier);
+ if (rc)
+ goto err_out_interval;
+ return;
+
+err_out_interval:
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+err_out_tx:
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+err_out_rx:
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+err_out_link:
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+err_out_device_name:
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+ led_cdev->trigger_data = NULL;
+ kfree(trigger_data);
+}
+
+static void netdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (trigger_data) {
+ unregister_netdevice_notifier(&trigger_data->notifier);
+
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+
+ kfree(trigger_data);
+ }
+}
+
+static struct led_trigger netdev_led_trigger = {
+ .name = "netdev",
+ .activate = netdev_trig_activate,
+ .deactivate = netdev_trig_deactivate,
+};
+
+static int __init netdev_trig_init(void)
+{
+ return led_trigger_register(&netdev_led_trigger);
+}
+
+static void __exit netdev_trig_exit(void)
+{
+ led_trigger_unregister(&netdev_led_trigger);
+}
+
+module_init(netdev_trig_init);
+module_exit(netdev_trig_exit);
+
+MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
+MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
+MODULE_DESCRIPTION("Netdev LED trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 7acce64b692a..9d1769073562 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -1,22 +1,15 @@
-/*
- * LED Kernel Transient Trigger
- *
- * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
- *
- * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
- * ledtrig-heartbeat.c
- * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
- * Neil Brown <neilb@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-/*
- * Transient trigger allows one shot timer activation. Please refer to
- * Documentation/leds/ledtrig-transient.txt for details
-*/
+// SPDX-License-Identifier: GPL-2.0
+//
+// LED Kernel Transient Trigger
+//
+// Transient trigger allows one shot timer activation. Please refer to
+// Documentation/leds/ledtrig-transient.txt for details
+// Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+//
+// Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+// ledtrig-heartbeat.c
+// Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+// Neil Brown <neilb@suse.de>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -238,4 +231,4 @@ module_exit(transient_trig_exit);
MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
MODULE_DESCRIPTION("Transient LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 5e9e8a1fdefb..5beacab05ed7 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -176,7 +176,7 @@ static ssize_t uleds_read(struct file *file, char __user *buffer, size_t count,
return retval;
}
-static unsigned int uleds_poll(struct file *file, poll_table *wait)
+static __poll_t uleds_poll(struct file *file, poll_table *wait)
{
struct uleds_device *udev = file->private_data;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 2a953efec4e1..10c08982185a 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,13 +27,6 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
-config NVM_RRPC
- tristate "Round-robin Hybrid Open-Channel SSD target"
- ---help---
- Allows an open-channel SSD to be exposed as a block device to the
- host. The target is implemented using a linear mapping table and
- cost-based garbage collection. It is optimized for 4K IO sizes.
-
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
---help---
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 2c3fd9d2c08c..97d9d7c71550 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_NVM) := core.o
-obj-$(CONFIG_NVM_RRPC) += rrpc.o
obj-$(CONFIG_NVM_PBLK) += pblk.o
pblk-y := pblk-init.o pblk-core.o pblk-rb.o \
pblk-write.o pblk-cache.o pblk-read.o \
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 83249b43dd06..dcc9e621e651 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,12 +45,6 @@ struct nvm_dev_map {
int nr_chnls;
};
-struct nvm_area {
- struct list_head list;
- sector_t begin;
- sector_t end; /* end is excluded */
-};
-
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -62,6 +56,30 @@ static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
return NULL;
}
+static bool nvm_target_exists(const char *name)
+{
+ struct nvm_dev *dev;
+ struct nvm_target *tgt;
+ bool ret = false;
+
+ down_write(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ list_for_each_entry(tgt, &dev->targets, list) {
+ if (!strcmp(name, tgt->disk->disk_name)) {
+ ret = true;
+ mutex_unlock(&dev->mlock);
+ goto out;
+ }
+ }
+ mutex_unlock(&dev->mlock);
+ }
+
+out:
+ up_write(&nvm_lock);
+ return ret;
+}
+
static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
{
int i;
@@ -104,7 +122,7 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
if (clear) {
for (j = 0; j < ch_map->nr_luns; j++) {
int lun = j + lun_offs[j];
- int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+ int lunid = (ch * dev->geo.nr_luns) + lun;
WARN_ON(!test_and_clear_bit(lunid,
dev->lun_map));
@@ -122,7 +140,8 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
}
static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
- int lun_begin, int lun_end)
+ u16 lun_begin, u16 lun_end,
+ u16 op)
{
struct nvm_tgt_dev *tgt_dev = NULL;
struct nvm_dev_map *dev_rmap = dev->rmap;
@@ -130,10 +149,10 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
struct ppa_addr *luns;
int nr_luns = lun_end - lun_begin + 1;
int luns_left = nr_luns;
- int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
- int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
- int bch = lun_begin / dev->geo.luns_per_chnl;
- int blun = lun_begin % dev->geo.luns_per_chnl;
+ int nr_chnls = nr_luns / dev->geo.nr_luns;
+ int nr_chnls_mod = nr_luns % dev->geo.nr_luns;
+ int bch = lun_begin / dev->geo.nr_luns;
+ int blun = lun_begin % dev->geo.nr_luns;
int lunid = 0;
int lun_balanced = 1;
int prev_nr_luns;
@@ -154,15 +173,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
if (!luns)
goto err_luns;
- prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ prev_nr_luns = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
for (i = 0; i < nr_chnls; i++) {
struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
int *lun_roffs = ch_rmap->lun_offs;
struct nvm_ch_map *ch_map = &dev_map->chnls[i];
int *lun_offs;
- int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ int luns_in_chnl = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
if (lun_balanced && prev_nr_luns != luns_in_chnl)
lun_balanced = 0;
@@ -199,8 +218,9 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
/* Target device only owns a portion of the physical device */
tgt_dev->geo.nr_chnls = nr_chnls;
- tgt_dev->geo.nr_luns = nr_luns;
- tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.all_luns = nr_luns;
+ tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.op = op;
tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
tgt_dev->q = dev->q;
tgt_dev->map = dev_map;
@@ -226,27 +246,79 @@ static const struct block_device_operations nvm_fops = {
.owner = THIS_MODULE,
};
-static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
+static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
{
- struct nvm_tgt_type *tmp, *tt = NULL;
+ struct nvm_tgt_type *tt;
- if (lock)
- down_write(&nvm_tgtt_lock);
+ list_for_each_entry(tt, &nvm_tgt_types, list)
+ if (!strcmp(name, tt->name))
+ return tt;
- list_for_each_entry(tmp, &nvm_tgt_types, list)
- if (!strcmp(name, tmp->name)) {
- tt = tmp;
- break;
- }
+ return NULL;
+}
+
+static struct nvm_tgt_type *nvm_find_target_type(const char *name)
+{
+ struct nvm_tgt_type *tt;
+
+ down_write(&nvm_tgtt_lock);
+ tt = __nvm_find_target_type(name);
+ up_write(&nvm_tgtt_lock);
- if (lock)
- up_write(&nvm_tgtt_lock);
return tt;
}
+static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
+ int lun_end)
+{
+ if (lun_begin > lun_end || lun_end >= geo->all_luns) {
+ pr_err("nvm: lun out of bound (%u:%u > %u)\n",
+ lun_begin, lun_end, geo->all_luns - 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __nvm_config_simple(struct nvm_dev *dev,
+ struct nvm_ioctl_create_simple *s)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (s->lun_begin == -1 && s->lun_end == -1) {
+ s->lun_begin = 0;
+ s->lun_end = geo->all_luns - 1;
+ }
+
+ return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
+}
+
+static int __nvm_config_extended(struct nvm_dev *dev,
+ struct nvm_ioctl_create_extended *e)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
+ e->lun_begin = 0;
+ e->lun_end = dev->geo.all_luns - 1;
+ }
+
+ /* op not set falls into target's default */
+ if (e->op == 0xFFFF)
+ e->op = NVM_TARGET_DEFAULT_OP;
+
+ if (e->op < NVM_TARGET_MIN_OP ||
+ e->op > NVM_TARGET_MAX_OP) {
+ pr_err("nvm: invalid over provisioning value\n");
+ return -EINVAL;
+ }
+
+ return nvm_config_check_luns(geo, e->lun_begin, e->lun_end);
+}
+
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
- struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct nvm_ioctl_create_extended e;
struct request_queue *tqueue;
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
@@ -255,22 +327,41 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
void *targetdata;
int ret;
- tt = nvm_find_target_type(create->tgttype, 1);
+ switch (create->conf.type) {
+ case NVM_CONFIG_TYPE_SIMPLE:
+ ret = __nvm_config_simple(dev, &create->conf.s);
+ if (ret)
+ return ret;
+
+ e.lun_begin = create->conf.s.lun_begin;
+ e.lun_end = create->conf.s.lun_end;
+ e.op = NVM_TARGET_DEFAULT_OP;
+ break;
+ case NVM_CONFIG_TYPE_EXTENDED:
+ ret = __nvm_config_extended(dev, &create->conf.e);
+ if (ret)
+ return ret;
+
+ e = create->conf.e;
+ break;
+ default:
+ pr_err("nvm: config type not valid\n");
+ return -EINVAL;
+ }
+
+ tt = nvm_find_target_type(create->tgttype);
if (!tt) {
pr_err("nvm: target type %s not found\n", create->tgttype);
return -EINVAL;
}
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- mutex_unlock(&dev->mlock);
+ if (nvm_target_exists(create->tgtname)) {
+ pr_err("nvm: target name already exists (%s)\n",
+ create->tgtname);
return -EINVAL;
}
- mutex_unlock(&dev->mlock);
- ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
+ ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
if (ret)
return ret;
@@ -280,7 +371,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_reserve;
}
- tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
if (!tgt_dev) {
pr_err("nvm: could not create target device\n");
ret = -ENOMEM;
@@ -350,7 +441,7 @@ err_dev:
err_t:
kfree(t);
err_reserve:
- nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
+ nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
return ret;
}
@@ -420,7 +511,7 @@ static int nvm_register_map(struct nvm_dev *dev)
for (i = 0; i < dev->geo.nr_chnls; i++) {
struct nvm_ch_map *ch_rmap;
int *lun_roffs;
- int luns_in_chnl = dev->geo.luns_per_chnl;
+ int luns_in_chnl = dev->geo.nr_luns;
ch_rmap = &rmap->chnls[i];
@@ -524,41 +615,12 @@ static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
}
-void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
- int len)
-{
- struct nvm_geo *geo = &dev->geo;
- struct nvm_dev_map *dev_rmap = dev->rmap;
- u64 i;
-
- for (i = 0; i < len; i++) {
- struct nvm_ch_map *ch_rmap;
- int *lun_roffs;
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- u64 diff;
-
- if (!pba)
- continue;
-
- gaddr = linear_to_generic_addr(geo, pba);
- ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
- lun_roffs = ch_rmap->lun_offs;
-
- diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
- (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
-
- entries[i] -= cpu_to_le64(diff);
- }
-}
-EXPORT_SYMBOL(nvm_part_to_tgt);
-
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
down_write(&nvm_tgtt_lock);
- if (nvm_find_target_type(tt->name, 0))
+ if (__nvm_find_target_type(tt->name))
ret = -EEXIST;
else
list_add(&tt->list, &nvm_tgt_types);
@@ -726,112 +788,6 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io_sync);
-int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
- int nr_ppas)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct nvm_rq rqd;
- int ret;
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- rqd.opcode = NVM_OP_ERASE;
- rqd.flags = geo->plane_mode >> 1;
-
- ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
- if (ret)
- return ret;
-
- ret = nvm_submit_io_sync(tgt_dev, &rqd);
- if (ret) {
- pr_err("rrpr: erase I/O submission failed: %d\n", ret);
- goto free_ppa_list;
- }
-
-free_ppa_list:
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_erase_sync);
-
-int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvm_dev *dev = tgt_dev->parent;
-
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
- return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
-}
-EXPORT_SYMBOL(nvm_get_l2p_tbl);
-
-int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_geo *geo = &dev->geo;
- struct nvm_area *area, *prev, *next;
- sector_t begin = 0;
- sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
-
- if (len > max_sectors)
- return -EINVAL;
-
- area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
- if (!area)
- return -ENOMEM;
-
- prev = NULL;
-
- spin_lock(&dev->lock);
- list_for_each_entry(next, &dev->area_list, list) {
- if (begin + len > next->begin) {
- begin = next->end;
- prev = next;
- continue;
- }
- break;
- }
-
- if ((begin + len) > max_sectors) {
- spin_unlock(&dev->lock);
- kfree(area);
- return -EINVAL;
- }
-
- area->begin = *lba = begin;
- area->end = begin + len;
-
- if (prev) /* insert into sorted order */
- list_add(&area->list, &prev->list);
- else
- list_add(&area->list, &dev->area_list);
- spin_unlock(&dev->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_get_area);
-
-void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_area *area;
-
- spin_lock(&dev->lock);
- list_for_each_entry(area, &dev->area_list, list) {
- if (area->begin != begin)
- continue;
-
- list_del(&area->list);
- spin_unlock(&dev->lock);
- kfree(area);
- return;
- }
- spin_unlock(&dev->lock);
-}
-EXPORT_SYMBOL(nvm_put_area);
-
void nvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_dev *tgt_dev = rqd->dev;
@@ -858,10 +814,10 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != geo->blks_per_lun * geo->plane_mode)
+ if (nr_blks != geo->nr_chks * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ for (blk = 0; blk < geo->nr_chks; blk++) {
offset = blk * geo->plane_mode;
blktype = blks[offset];
@@ -877,7 +833,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return geo->blks_per_lun;
+ return geo->nr_chks;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -892,53 +848,6 @@ int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
}
EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
-static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- struct nvm_geo *geo = &dev->geo;
- int i;
-
- dev->lps_per_blk = geo->pgs_per_blk;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* Just a linear array */
- for (i = 0; i < dev->lps_per_blk; i++)
- dev->lptbl[i] = i;
-
- return 0;
-}
-
-static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- int i, p;
- struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
-
- if (!mlc->num_pairs)
- return 0;
-
- dev->lps_per_blk = mlc->num_pairs;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* The lower page table encoding consists of a list of bytes, where each
- * has a lower and an upper half. The first half byte maintains the
- * increment value and every value after is an offset added to the
- * previous incrementation value
- */
- dev->lptbl[0] = mlc->pairs[0] & 0xF;
- for (i = 1; i < dev->lps_per_blk; i++) {
- p = mlc->pairs[i >> 1];
- if (i & 0x1) /* upper */
- dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
- else /* lower */
- dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
- }
-
- return 0;
-}
-
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
@@ -946,66 +855,44 @@ static int nvm_core_init(struct nvm_dev *dev)
struct nvm_geo *geo = &dev->geo;
int ret;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* Whole device values */
geo->nr_chnls = grp->num_ch;
- geo->luns_per_chnl = grp->num_lun;
-
- /* Generic device values */
- geo->pgs_per_blk = grp->num_pg;
- geo->blks_per_lun = grp->num_blk;
- geo->nr_planes = grp->num_pln;
- geo->fpg_size = grp->fpg_sz;
- geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->nr_luns = grp->num_lun;
+
+ /* Generic device geometry values */
+ geo->ws_min = grp->ws_min;
+ geo->ws_opt = grp->ws_opt;
+ geo->ws_seq = grp->ws_seq;
+ geo->ws_per_chk = grp->ws_per_chk;
+ geo->nr_chks = grp->num_chk;
geo->sec_size = grp->csecs;
geo->oob_size = grp->sos;
- geo->sec_per_pg = grp->fpg_sz / grp->csecs;
geo->mccap = grp->mccap;
- memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- geo->plane_mode = NVM_PLANE_SINGLE;
geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
- if (grp->mpos & 0x020202)
- geo->plane_mode = NVM_PLANE_DOUBLE;
- if (grp->mpos & 0x040404)
- geo->plane_mode = NVM_PLANE_QUAD;
+ geo->sec_per_chk = grp->clba;
+ geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
+ geo->all_luns = geo->nr_luns * geo->nr_chnls;
- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- /* calculated values */
+ /* 1.2 spec device geometry values */
+ geo->plane_mode = 1 << geo->ws_seq;
+ geo->nr_planes = geo->ws_opt / geo->ws_min;
+ geo->sec_per_pg = geo->ws_min;
geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
- geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
- geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
- geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = geo->nr_luns * geo->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
+ dev->total_secs = geo->all_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- ret = -EINVAL;
- goto err_fmtype;
- }
-
INIT_LIST_HEAD(&dev->area_list);
INIT_LIST_HEAD(&dev->targets);
mutex_init(&dev->mlock);
@@ -1031,7 +918,6 @@ static void nvm_free(struct nvm_dev *dev)
dev->ops->destroy_dma_pool(dev->dma_pool);
nvm_unregister_map(dev);
- kfree(dev->lptbl);
kfree(dev->lun_map);
kfree(dev);
}
@@ -1062,8 +948,8 @@ static int nvm_init(struct nvm_dev *dev)
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
dev->name, geo->sec_per_pg, geo->nr_planes,
- geo->pgs_per_blk, geo->blks_per_lun,
- geo->nr_luns, geo->nr_chnls);
+ geo->ws_per_chk, geo->nr_chks,
+ geo->all_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
@@ -1135,7 +1021,6 @@ EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
- struct nvm_ioctl_create_simple *s;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1146,23 +1031,6 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
- pr_err("nvm: config type not valid\n");
- return -EINVAL;
- }
- s = &create->conf.s;
-
- if (s->lun_begin == -1 && s->lun_end == -1) {
- s->lun_begin = 0;
- s->lun_end = dev->geo.nr_luns - 1;
- }
-
- if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
- pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
- return -EINVAL;
- }
-
return nvm_create_tgt(dev, create);
}
@@ -1262,6 +1130,12 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
return -EFAULT;
+ if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
+ create.conf.e.rsv != 0) {
+ pr_err("nvm: reserved config field in use\n");
+ return -EINVAL;
+ }
+
create.dev[DISK_NAME_LEN - 1] = '\0';
create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
create.tgtname[DISK_NAME_LEN - 1] = '\0';
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 0d227ef7d1b9..000fcad38136 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -19,12 +19,16 @@
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
+ struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
sector_t lba = pblk_get_lba(bio);
+ unsigned long start_time = jiffies;
unsigned int bpos, pos;
int nr_entries = pblk_get_secs(bio);
int i, ret;
+ generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);
+
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
* rollback from here on.
@@ -67,6 +71,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
+ generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 76516ee84e9a..0487b9340c1d 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -32,8 +32,8 @@ static void pblk_line_mark_bb(struct work_struct *work)
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
- pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
+ line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ pos = pblk_ppa_to_pos(&dev->geo, *ppa);
pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
line->id, pos);
@@ -48,7 +48,7 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_dev_ppa_to_pos(geo, *ppa);
+ int pos = pblk_ppa_to_pos(geo, *ppa);
pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
atomic_long_inc(&pblk->erase_failed);
@@ -66,7 +66,7 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
{
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
+ line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
atomic_dec(&line->left_seblks);
if (rqd->error) {
@@ -144,7 +144,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
BUG_ON(pblk_ppa_empty(ppa));
#endif
- line_id = pblk_tgt_ppa_to_line(ppa);
+ line_id = pblk_ppa_to_line(ppa);
line = &pblk->lines[line_id];
paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
@@ -650,7 +650,7 @@ next_rq:
} else {
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_dev_ppa_to_pos(geo, ppa);
+ int pos = pblk_ppa_to_pos(geo, ppa);
int read_type = PBLK_READ_RANDOM;
if (pblk_io_aligned(pblk, rq_ppas))
@@ -668,7 +668,7 @@ next_rq:
}
ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
@@ -742,7 +742,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
cmd_op = NVM_OP_PWRITE;
flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ) {
+ } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -802,7 +802,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
if (rqd.error) {
if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
- else
+ else if (dir == PBLK_READ)
pblk_log_read_err(pblk, &rqd);
}
@@ -816,7 +816,7 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
{
u64 bpaddr = pblk_line_smeta_start(pblk, line);
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
+ return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
}
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
@@ -854,8 +854,8 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not sync erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
rqd.error = ret;
goto out;
@@ -979,7 +979,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
/* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
- smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
+ smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
/* Fill metadata among lines */
if (cur) {
@@ -1032,7 +1032,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
lm->sec_per_line);
bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
lm->sec_per_line);
- line->sec_in_line -= geo->sec_per_blk;
+ line->sec_in_line -= geo->sec_per_chk;
if (bit >= lm->emeta_bb)
nr_bb++;
}
@@ -1145,7 +1145,7 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
}
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
if (!pblk_line_init_bb(pblk, line, 0)) {
list_add(&line->list, &l_mg->free_list);
@@ -1233,7 +1233,7 @@ retry:
l_mg->data_line = retry_line;
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, retry_line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, false);
if (pblk_line_erase(pblk, retry_line))
goto retry;
@@ -1252,7 +1252,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line;
- int is_next = 0;
spin_lock(&l_mg->free_lock);
line = pblk_line_get(pblk);
@@ -1280,7 +1279,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
@@ -1290,10 +1288,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
return NULL;
}
- pblk_rl_free_lines_dec(&pblk->rl, line);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
retry_setup:
if (!pblk_line_init_metadata(pblk, line, NULL)) {
line = pblk_line_retry(pblk, line);
@@ -1311,6 +1305,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
+
return line;
}
@@ -1395,7 +1391,6 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
- int is_next = 0;
cur = l_mg->data_line;
new = l_mg->data_next;
@@ -1444,6 +1439,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, new, true);
+
/* Allocate next line for preparation */
spin_lock(&l_mg->free_lock);
l_mg->data_next = pblk_line_get(pblk);
@@ -1457,13 +1454,9 @@ retry_setup:
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
out:
return new;
}
@@ -1561,8 +1554,8 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not async erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
}
return err;
@@ -1746,7 +1739,7 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
- int nr_luns = geo->nr_luns;
+ int nr_luns = geo->all_luns;
int bit = -1;
while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
@@ -1884,7 +1877,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
/* If the L2P entry maps to a line, the reference is valid */
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
- int line_id = pblk_dev_ppa_to_line(ppa);
+ int line_id = pblk_ppa_to_line(ppa);
struct pblk_line *line = &pblk->lines[line_id];
kref_get(&line->ref);
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 9c8e114c8a54..3d899383666e 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -169,7 +169,14 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
* the line untouched. TODO: Implement a recovery routine that scans and
* moves all sectors on the line.
*/
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+
+ ret = pblk_recov_check_emeta(pblk, emeta_buf);
+ if (ret) {
+ pr_err("pblk: inconsistent emeta (line %d)\n", line->id);
+ goto fail_free_emeta;
+ }
+
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list) {
pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
goto fail_free_emeta;
@@ -519,22 +526,12 @@ void pblk_gc_should_start(struct pblk *pblk)
}
}
-/*
- * If flush_wq == 1 then no lock should be held by the caller since
- * flush_workqueue can sleep
- */
-static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
-{
- pblk->gc.gc_active = 0;
- pr_debug("pblk: gc stop\n");
-}
-
void pblk_gc_should_stop(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
if (gc->gc_active && !gc->gc_forced)
- pblk_gc_stop(pblk, 0);
+ gc->gc_active = 0;
}
void pblk_gc_should_kick(struct pblk *pblk)
@@ -660,7 +657,7 @@ void pblk_gc_exit(struct pblk *pblk)
gc->gc_enabled = 0;
del_timer_sync(&gc->gc_timer);
- pblk_gc_stop(pblk, 1);
+ gc->gc_active = 0;
if (gc->gc_ts)
kthread_stop(gc->gc_ts);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 695826a06b5d..93d671ca518e 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -169,8 +169,8 @@ static int pblk_set_ppaf(struct pblk *pblk)
}
ppaf.ch_len = power_len;
- power_len = get_count_order(geo->luns_per_chnl);
- if (1 << power_len != geo->luns_per_chnl) {
+ power_len = get_count_order(geo->nr_luns);
+ if (1 << power_len != geo->nr_luns) {
pr_err("pblk: supports only power-of-two LUN config.\n");
return -EINVAL;
}
@@ -254,7 +254,7 @@ static int pblk_core_init(struct pblk *pblk)
struct nvm_geo *geo = &dev->geo;
pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
- geo->nr_planes * geo->nr_luns;
+ geo->nr_planes * geo->all_luns;
if (pblk_init_global_caches(pblk))
return -ENOMEM;
@@ -270,21 +270,22 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->gen_ws_pool)
goto free_page_bio_pool;
- pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
+ pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
+ pblk_rec_cache);
if (!pblk->rec_pool)
goto free_gen_ws_pool;
- pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->e_rq_pool)
goto free_r_rq_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
goto free_e_rq_pool;
@@ -354,6 +355,8 @@ static void pblk_core_free(struct pblk *pblk)
mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
+ pblk_rwb_free(pblk);
+
pblk_free_global_caches(pblk);
}
@@ -409,7 +412,7 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
u8 *blks;
int nr_blks, ret;
- nr_blks = geo->blks_per_lun * geo->plane_mode;
+ nr_blks = geo->nr_chks * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
@@ -482,20 +485,21 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
int i, ret;
/* TODO: Implement unbalanced LUN support */
- if (geo->luns_per_chnl < 0) {
+ if (geo->nr_luns < 0) {
pr_err("pblk: unbalanced LUN config.\n");
return -EINVAL;
}
- pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
+ pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
+ GFP_KERNEL);
if (!pblk->luns)
return -ENOMEM;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
/* Stripe across channels */
int ch = i % geo->nr_chnls;
int lun_raw = i / geo->nr_chnls;
- int lunid = lun_raw + ch * geo->luns_per_chnl;
+ int lunid = lun_raw + ch * geo->nr_luns;
rlun = &pblk->luns[i];
rlun->bppa = luns[lunid];
@@ -577,22 +581,37 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
sector_t provisioned;
+ int sec_meta, blk_meta;
- pblk->over_pct = 20;
+ if (geo->op == NVM_TARGET_DEFAULT_OP)
+ pblk->op = PBLK_DEFAULT_OP;
+ else
+ pblk->op = geo->op;
provisioned = nr_free_blks;
- provisioned *= (100 - pblk->over_pct);
+ provisioned *= (100 - pblk->op);
sector_div(provisioned, 100);
+ pblk->op_blks = nr_free_blks - provisioned;
+
/* Internally pblk manages all free blocks, but all calculations based
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_blks;
- pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
- pblk->capacity = provisioned * geo->sec_per_blk;
+ pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
+
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
+
+ pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
+
atomic_set(&pblk->rl.free_blocks, nr_free_blks);
+ atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
}
static int pblk_lines_alloc_metadata(struct pblk *pblk)
@@ -683,7 +702,7 @@ static int pblk_lines_init(struct pblk *pblk)
int i, ret;
pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
- max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
+ max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
max_write_ppas : nvm_max_phys_sects(dev);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
@@ -693,26 +712,26 @@ static int pblk_lines_init(struct pblk *pblk)
return -EINVAL;
}
- div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
+ div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
if (mod) {
pr_err("pblk: bad configuration of sectors/pages\n");
return -EINVAL;
}
- l_mg->nr_lines = geo->blks_per_lun;
+ l_mg->nr_lines = geo->nr_chks;
l_mg->log_line = l_mg->data_line = NULL;
l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
l_mg->nr_free_lines = 0;
bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
- lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
- lm->blk_per_line = geo->nr_luns;
- lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
+ lm->blk_per_line = geo->all_luns;
+ lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
- lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->mid_thrs = lm->sec_per_line / 2;
lm->high_thrs = lm->sec_per_line / 4;
- lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
+ lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct
* line_smeta definition
@@ -742,12 +761,12 @@ add_emeta_page:
goto add_emeta_page;
}
- lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
+ lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
lm->min_blk_line = 1;
- if (geo->nr_luns > 1)
+ if (geo->all_luns > 1)
lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
- lm->emeta_sec[0], geo->sec_per_blk);
+ lm->emeta_sec[0], geo->sec_per_chk);
if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
@@ -772,7 +791,7 @@ add_emeta_page:
goto fail_free_bb_template;
}
- bb_distance = (geo->nr_luns) * geo->sec_per_pl;
+ bb_distance = (geo->all_luns) * geo->sec_per_pl;
for (i = 0; i < lm->sec_per_line; i += bb_distance)
bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
@@ -844,7 +863,7 @@ add_emeta_page:
pblk_set_provision(pblk, nr_free_blks);
/* Cleanup per-LUN bad block lists - managed within lines on run-time */
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return 0;
@@ -858,7 +877,7 @@ fail_free_bb_template:
fail_free_meta:
pblk_line_meta_free(pblk);
fail:
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return ret;
@@ -866,15 +885,19 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
- mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
-
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
if (IS_ERR(pblk->writer_ts)) {
- pr_err("pblk: could not allocate writer kthread\n");
- return PTR_ERR(pblk->writer_ts);
+ int err = PTR_ERR(pblk->writer_ts);
+
+ if (err != -EINTR)
+ pr_err("pblk: could not allocate writer kthread (%d)\n",
+ err);
+ return err;
}
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
+ mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
+
return 0;
}
@@ -910,7 +933,6 @@ static void pblk_tear_down(struct pblk *pblk)
pblk_pipeline_stop(pblk);
pblk_writer_stop(pblk);
pblk_rb_sync_l2p(&pblk->rwb);
- pblk_rwb_free(pblk);
pblk_rl_free(&pblk->rl);
pr_debug("pblk: consistent tear down\n");
@@ -1025,7 +1047,8 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
ret = pblk_writer_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize write thread\n");
+ if (ret != -EINTR)
+ pr_err("pblk: could not initialize write thread\n");
goto fail_free_lines;
}
@@ -1041,13 +1064,14 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_write_cache(tqueue, true, false);
- tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
+ tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
- pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
- geo->nr_luns, pblk->l_mg.nr_lines,
+ pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
+ tdisk->disk_name,
+ geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 6f3ecde2140f..7445e6430c52 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -146,7 +146,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
return;
/* Erase blocks that are bad in this line but might not be in next */
- if (unlikely(ppa_empty(*erase_ppa)) &&
+ if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
int bit = -1;
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index b8f78e401482..ec8fc314646b 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -54,7 +54,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
- rb->sync_point = EMPTY_ENTRY;
+ rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
spin_lock_init(&rb->s_lock);
@@ -112,7 +112,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
up_write(&pblk_rb_lock);
#ifdef CONFIG_NVM_DEBUG
- atomic_set(&rb->inflight_sync_point, 0);
+ atomic_set(&rb->inflight_flush_point, 0);
#endif
/*
@@ -226,7 +226,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
entry->cacheline);
- line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
@@ -349,35 +349,35 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
smp_store_release(&entry->w_ctx.flags, flags);
}
-static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
+static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
unsigned int pos)
{
struct pblk_rb_entry *entry;
- unsigned int subm, sync_point;
+ unsigned int sync, flush_point;
- subm = READ_ONCE(rb->subm);
+ sync = READ_ONCE(rb->sync);
+
+ if (pos == sync)
+ return 0;
#ifdef CONFIG_NVM_DEBUG
- atomic_inc(&rb->inflight_sync_point);
+ atomic_inc(&rb->inflight_flush_point);
#endif
- if (pos == subm)
- return 0;
+ flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
+ entry = &rb->entries[flush_point];
- sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
- entry = &rb->entries[sync_point];
+ pblk_rb_sync_init(rb, NULL);
- /* Protect syncs */
- smp_store_release(&rb->sync_point, sync_point);
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, flush_point);
- if (!bio)
- return 0;
+ if (bio)
+ bio_list_add(&entry->w_ctx.bios, bio);
- spin_lock_irq(&rb->s_lock);
- bio_list_add(&entry->w_ctx.bios, bio);
- spin_unlock_irq(&rb->s_lock);
+ pblk_rb_sync_end(rb, NULL);
- return 1;
+ return bio ? 1 : 0;
}
static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
@@ -416,7 +416,7 @@ void pblk_rb_flush(struct pblk_rb *rb)
struct pblk *pblk = container_of(rb, struct pblk, rwb);
unsigned int mem = READ_ONCE(rb->mem);
- if (pblk_rb_sync_point_set(rb, NULL, mem))
+ if (pblk_rb_flush_point_set(rb, NULL, mem))
return;
pblk_write_should_kick(pblk);
@@ -440,7 +440,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->nr_flush);
#endif
- if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem))
+ if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
*io_ret = NVM_IO_OK;
}
@@ -606,21 +606,6 @@ try:
return NVM_IO_ERR;
}
- if (flags & PBLK_FLUSH_ENTRY) {
- unsigned int sync_point;
-
- sync_point = READ_ONCE(rb->sync_point);
- if (sync_point == pos) {
- /* Protect syncs */
- smp_store_release(&rb->sync_point, EMPTY_ENTRY);
- }
-
- flags &= ~PBLK_FLUSH_ENTRY;
-#ifdef CONFIG_NVM_DEBUG
- atomic_dec(&rb->inflight_sync_point);
-#endif
- }
-
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
@@ -730,15 +715,24 @@ void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
{
- unsigned int sync;
- unsigned int i;
-
+ unsigned int sync, flush_point;
lockdep_assert_held(&rb->s_lock);
sync = READ_ONCE(rb->sync);
+ flush_point = READ_ONCE(rb->flush_point);
- for (i = 0; i < nr_entries; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
+ if (flush_point != EMPTY_ENTRY) {
+ unsigned int secs_to_flush;
+
+ secs_to_flush = pblk_rb_ring_count(flush_point, sync,
+ rb->nr_entries);
+ if (secs_to_flush < nr_entries) {
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, EMPTY_ENTRY);
+ }
+ }
+
+ sync = (sync + nr_entries) & (rb->nr_entries - 1);
/* Protect from counts */
smp_store_release(&rb->sync, sync);
@@ -746,22 +740,27 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
return sync;
}
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb)
+/* Calculate how many sectors to submit up to the current flush point. */
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
{
- unsigned int subm, sync_point;
- unsigned int count;
+ unsigned int subm, sync, flush_point;
+ unsigned int submitted, to_flush;
- /* Protect syncs */
- sync_point = smp_load_acquire(&rb->sync_point);
- if (sync_point == EMPTY_ENTRY)
+ /* Protect flush points */
+ flush_point = smp_load_acquire(&rb->flush_point);
+ if (flush_point == EMPTY_ENTRY)
return 0;
+ /* Protect syncs */
+ sync = smp_load_acquire(&rb->sync);
+
subm = READ_ONCE(rb->subm);
+ submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
/* The sync point itself counts as a sector to sync */
- count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1;
+ to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
- return count;
+ return (submitted < to_flush) ? (to_flush - submitted) : 0;
}
/*
@@ -801,7 +800,7 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
(rb->sync == rb->l2p_update) &&
- (rb->sync_point == EMPTY_ENTRY)) {
+ (rb->flush_point == EMPTY_ENTRY)) {
goto out;
}
@@ -848,7 +847,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
queued_entries++;
spin_unlock_irq(&rb->s_lock);
- if (rb->sync_point != EMPTY_ENTRY)
+ if (rb->flush_point != EMPTY_ENTRY)
offset = scnprintf(buf, PAGE_SIZE,
"%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
rb->nr_entries,
@@ -857,14 +856,14 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
- rb->sync_point,
+ rb->flush_point,
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
else
offset = scnprintf(buf, PAGE_SIZE,
@@ -875,13 +874,13 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
return offset;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index ca79d8fb3e60..2f761283f43e 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -141,7 +141,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
struct ppa_addr ppa = ppa_list[i];
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
kref_put(&line->ref, pblk_line_put_wq);
}
}
@@ -158,8 +158,12 @@ static void pblk_end_user_read(struct bio *bio)
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bool put_line)
{
+ struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
+ unsigned long start_time = r_ctx->start_time;
+
+ generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -193,9 +197,9 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
struct pblk_sec_meta *meta_list = rqd->meta_list;
@@ -270,7 +274,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
- int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
+ int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
struct pblk_line *line = &pblk->lines[line_id];
kref_put(&line->ref, pblk_line_put);
@@ -306,6 +310,8 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
return NVM_IO_OK;
err:
+ pr_err("pblk: failed to perform partial read\n");
+
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
@@ -357,6 +363,7 @@ retry:
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
struct pblk_g_ctx *r_ctx;
@@ -372,6 +379,8 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR;
}
+ generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
+
bitmap_zero(&read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -383,6 +392,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
+ r_ctx->start_time = jiffies;
r_ctx->lba = blba;
/* Save the index for this bio's start. This is needed in case
@@ -422,7 +432,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
if (!int_bio) {
pr_err("pblk: could not clone read bio\n");
- return NVM_IO_ERR;
+ goto fail_end_io;
}
rqd->bio = int_bio;
@@ -433,7 +443,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
pr_err("pblk: read IO submission failed\n");
if (int_bio)
bio_put(int_bio);
- return ret;
+ goto fail_end_io;
}
return NVM_IO_OK;
@@ -442,17 +452,14 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
- if (ret) {
- pr_err("pblk: failed to perform partial read\n");
- return ret;
- }
-
- return NVM_IO_OK;
+ return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
+fail_end_io:
+ __pblk_end_io_read(pblk, rqd, false);
+ return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index eadb3eb5d4dc..1d5e961bf5e0 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -111,18 +111,18 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
return 0;
}
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
u32 crc;
crc = pblk_calc_emeta_crc(pblk, emeta_buf);
if (le32_to_cpu(emeta_buf->crc) != crc)
- return NULL;
+ return 1;
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
- return NULL;
+ return 1;
- return emeta_to_lbas(pblk, emeta_buf);
+ return 0;
}
static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
@@ -137,7 +137,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
u64 nr_valid_lbas, nr_lbas = 0;
u64 i;
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list)
return 1;
@@ -149,7 +149,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
struct ppa_addr ppa;
int pos;
- ppa = addr_to_pblk_ppa(pblk, i, line->id);
+ ppa = addr_to_gen_ppa(pblk, i, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
/* Do not update bad blocks */
@@ -188,7 +188,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
- nr_bb * geo->sec_per_blk;
+ nr_bb * geo->sec_per_chk;
}
struct pblk_recov_alloc {
@@ -263,12 +263,12 @@ next_read_rq:
int pos;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
r_ptr_int += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
@@ -288,7 +288,7 @@ next_read_rq:
/* At this point, the read should not fail. If it does, it is a problem
* we cannot recover from here. Need FTL log.
*/
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
return -EINTR;
}
@@ -411,12 +411,12 @@ next_pad_rq:
int pos;
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
}
@@ -541,12 +541,12 @@ next_rq:
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
@@ -672,12 +672,12 @@ next_rq:
paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
paddr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
@@ -817,7 +817,7 @@ static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
while (emeta_secs) {
emeta_start--;
- ppa = addr_to_pblk_ppa(pblk, emeta_start, line->id);
+ ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
if (!test_bit(pos, line->blk_bitmap))
emeta_secs--;
@@ -938,6 +938,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
goto next;
}
+ if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
+ pblk_recov_l2p_from_oob(pblk, line);
+ goto next;
+ }
+
if (pblk_recov_l2p_from_emeta(pblk, line))
pblk_recov_l2p_from_oob(pblk, line);
@@ -984,10 +989,8 @@ next:
}
spin_unlock(&l_mg->free_lock);
- if (is_next) {
+ if (is_next)
pblk_line_erase(pblk, l_mg->data_next);
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
- }
out:
if (found_lines != recovered_lines)
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index dacc71922260..0d457b162f23 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -89,17 +89,15 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
return atomic_read(&rl->free_blocks);
}
-/*
- * We check for (i) the number of free blocks in the current LUN and (ii) the
- * total number of free blocks in the pblk instance. This is to even out the
- * number of free blocks on each LUN when GC kicks in.
- *
- * Only the total number of free blocks is used to configure the rate limiter.
- */
-void pblk_rl_update_rates(struct pblk_rl *rl)
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
+{
+ return atomic_read(&rl->free_user_blocks);
+}
+
+static void __pblk_rl_update_rates(struct pblk_rl *rl,
+ unsigned long free_blocks)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
- unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
int max = rl->rb_budget;
if (free_blocks >= rl->high) {
@@ -132,20 +130,37 @@ void pblk_rl_update_rates(struct pblk_rl *rl)
pblk_gc_should_stop(pblk);
}
+void pblk_rl_update_rates(struct pblk_rl *rl)
+{
+ __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
+}
+
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_add(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+ free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_sub(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+
+ if (used)
+ free_blocks = atomic_sub_return(blk_in_line,
+ &rl->free_user_blocks);
+ else
+ free_blocks = atomic_read(&rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -174,16 +189,21 @@ void pblk_rl_free(struct pblk_rl *rl)
void pblk_rl_init(struct pblk_rl *rl, int budget)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
+ int sec_meta, blk_meta;
+
unsigned int rb_windows;
- rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
- rl->high_pw = get_count_order(rl->high);
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
- rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
- if (rl->low < min_blocks)
- rl->low = min_blocks;
+ rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
+ rl->high_pw = get_count_order(rl->high);
rl->rsv_blocks = min_blocks;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index cd49e8875d4e..620bab853579 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -28,7 +28,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
ssize_t sz = 0;
int i;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
int active = 1;
rlun = &pblk->luns[i];
@@ -49,11 +49,12 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
{
- int free_blocks, total_blocks;
+ int free_blocks, free_user_blocks, total_blocks;
int rb_user_max, rb_user_cnt;
int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
- free_blocks = atomic_read(&pblk->rl.free_blocks);
+ free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
+ free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
rb_user_max = pblk->rl.rb_user_max;
rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
rb_gc_max = pblk->rl.rb_gc_max;
@@ -64,16 +65,16 @@ static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
total_blocks = pblk->rl.total_blocks;
return snprintf(page, PAGE_SIZE,
- "u:%u/%u,gc:%u/%u(%u/%u)(stop:<%u,full:>%u,free:%d/%d)-%d\n",
+ "u:%u/%u,gc:%u/%u(%u)(stop:<%u,full:>%u,free:%d/%d/%d)-%d\n",
rb_user_cnt,
rb_user_max,
rb_gc_cnt,
rb_gc_max,
rb_state,
rb_budget,
- pblk->rl.low,
pblk->rl.high,
free_blocks,
+ free_user_blocks,
total_blocks,
READ_ONCE(pblk->rl.rb_user_active));
}
@@ -238,7 +239,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz = snprintf(page, PAGE_SIZE - sz,
"line: nluns:%d, nblks:%d, nsecs:%d\n",
- geo->nr_luns, lm->blk_per_line, lm->sec_per_line);
+ geo->all_luns, lm->blk_per_line, lm->sec_per_line);
sz += snprintf(page + sz, PAGE_SIZE - sz,
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
@@ -287,7 +288,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
"blk_line:%d, sec_line:%d, sec_blk:%d\n",
lm->blk_per_line,
lm->sec_per_line,
- geo->sec_per_blk);
+ geo->sec_per_chk);
return sz;
}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6c1cafafef53..aae86ed60b98 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -21,13 +21,28 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
struct bio *original_bio;
+ struct pblk_rb *rwb = &pblk->rwb;
unsigned long ret;
int i;
for (i = 0; i < c_ctx->nr_valid; i++) {
struct pblk_w_ctx *w_ctx;
+ int pos = c_ctx->sentry + i;
+ int flags;
+
+ w_ctx = pblk_rb_w_ctx(rwb, pos);
+ flags = READ_ONCE(w_ctx->flags);
+
+ if (flags & PBLK_FLUSH_ENTRY) {
+ flags &= ~PBLK_FLUSH_ENTRY;
+ /* Release flags on context. Protect from writes */
+ smp_store_release(&w_ctx->flags, flags);
+
+#ifdef CONFIG_NVM_DEBUG
+ atomic_dec(&rwb->inflight_flush_point);
+#endif
+ }
- w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
while ((original_bio = bio_list_pop(&w_ctx->bios)))
bio_endio(original_bio);
}
@@ -439,7 +454,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
struct pblk_line *meta_line;
int err;
- ppa_set_empty(&erase_ppa);
+ pblk_ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
@@ -457,7 +472,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
}
- if (!ppa_empty(erase_ppa)) {
+ if (!pblk_ppa_empty(erase_ppa)) {
/* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
@@ -508,7 +523,7 @@ static int pblk_submit_write(struct pblk *pblk)
if (!secs_avail)
return 1;
- secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
+ secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
return 1;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 59a64d461a5d..8c357fb6538e 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -51,17 +51,16 @@
#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
-#define pblk_for_each_lun(pblk, rlun, i) \
- for ((i) = 0, rlun = &(pblk)->luns[0]; \
- (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
-
/* Static pool sizes */
#define PBLK_GEN_WS_POOL_SIZE (2)
+#define PBLK_DEFAULT_OP (11)
+
enum {
PBLK_READ = READ,
PBLK_WRITE = WRITE,/* Write from write buffer */
PBLK_WRITE_INT, /* Internal write - no write buffer */
+ PBLK_READ_RECOV, /* Recovery read - errors allowed */
PBLK_ERASE,
};
@@ -114,6 +113,7 @@ struct pblk_c_ctx {
/* read context */
struct pblk_g_ctx {
void *private;
+ unsigned long start_time;
u64 lba;
};
@@ -170,7 +170,7 @@ struct pblk_rb {
* the last submitted entry that has
* been successfully persisted to media
*/
- unsigned int sync_point; /* Sync point - last entry that must be
+ unsigned int flush_point; /* Sync point - last entry that must be
* flushed to the media. Used with
* REQ_FLUSH and REQ_FUA
*/
@@ -193,7 +193,7 @@ struct pblk_rb {
spinlock_t s_lock; /* Sync lock */
#ifdef CONFIG_NVM_DEBUG
- atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
+ atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
#endif
};
@@ -256,9 +256,6 @@ struct pblk_rl {
unsigned int high; /* Upper threshold for rate limiter (free run -
* user I/O rate limiter
*/
- unsigned int low; /* Lower threshold for rate limiter (user I/O
- * rate limiter - stall)
- */
unsigned int high_pw; /* High rounded up as a power of 2 */
#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
@@ -292,7 +289,9 @@ struct pblk_rl {
unsigned long long nr_secs;
unsigned long total_blocks;
- atomic_t free_blocks;
+
+ atomic_t free_blocks; /* Total number of free blocks (+ OP) */
+ atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
};
#define PBLK_LINE_EMPTY (~0U)
@@ -583,7 +582,9 @@ struct pblk {
*/
sector_t capacity; /* Device capacity when bad blocks are subtracted */
- int over_pct; /* Percentage of device used for over-provisioning */
+
+ int op; /* Percentage of device used for over-provisioning */
+ int op_blks; /* Number of blocks used for over-provisioning */
/* pblk provisioning values. Used by rate limiter */
struct pblk_rl rl;
@@ -691,7 +692,7 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
struct ppa_addr *ppa);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
unsigned int pblk_rb_read_count(struct pblk_rb *rb);
unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
@@ -812,7 +813,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
void pblk_submit_rec(struct work_struct *work);
struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
int pblk_recov_pad(struct pblk *pblk);
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_rec_ctx *recovery, u64 *comp_bits,
unsigned int comp);
@@ -843,6 +844,7 @@ void pblk_rl_free(struct pblk_rl *rl);
void pblk_rl_update_rates(struct pblk_rl *rl);
int pblk_rl_high_thrs(struct pblk_rl *rl);
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
@@ -851,7 +853,8 @@ void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
int pblk_rl_max_io(struct pblk_rl *rl);
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used);
int pblk_rl_is_limit(struct pblk_rl *rl);
/*
@@ -907,28 +910,47 @@ static inline int pblk_pad_distance(struct pblk *pblk)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
+ return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl;
}
-static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_line(struct ppa_addr p)
{
return p.g.blk;
}
-static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
{
- return p.g.blk;
+ return p.g.lun * geo->nr_chnls + p.g.ch;
}
-static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
+ u64 line_id)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ struct ppa_addr ppa;
+
+ ppa.ppa = 0;
+ ppa.g.blk = line_id;
+ ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
+ ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
+ ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
+ ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
+ ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
+
+ return ppa;
}
-/* A block within a line corresponds to the lun */
-static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
+ struct ppa_addr p)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ u64 paddr;
+
+ paddr = (u64)p.g.pg << pblk->ppaf.pg_offset;
+ paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
+ paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
+ paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
+ paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+
+ return paddr;
}
static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
@@ -960,24 +982,6 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
return ppa64;
}
-static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
- sector_t lba)
-{
- struct ppa_addr ppa;
-
- if (pblk->ppaf_bitsize < 32) {
- u32 *map = (u32 *)pblk->trans_map;
-
- ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
- } else {
- struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
-
- ppa = map[lba];
- }
-
- return ppa;
-}
-
static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
u32 ppa32 = 0;
@@ -999,33 +1003,36 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
return ppa32;
}
-static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
- struct ppa_addr ppa)
+static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
+ sector_t lba)
{
+ struct ppa_addr ppa;
+
if (pblk->ppaf_bitsize < 32) {
u32 *map = (u32 *)pblk->trans_map;
- map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
} else {
- u64 *map = (u64 *)pblk->trans_map;
+ struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
- map[lba] = ppa.ppa;
+ ppa = map[lba];
}
+
+ return ppa;
}
-static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
- struct ppa_addr p)
+static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
+ struct ppa_addr ppa)
{
- u64 paddr;
+ if (pblk->ppaf_bitsize < 32) {
+ u32 *map = (u32 *)pblk->trans_map;
- paddr = 0;
- paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
- paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
- paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
- paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
- paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+ map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ } else {
+ u64 *map = (u64 *)pblk->trans_map;
- return paddr;
+ map[lba] = ppa.ppa;
+ }
}
static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
@@ -1040,10 +1047,7 @@ static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
{
- if (lppa.ppa == rppa.ppa)
- return true;
-
- return false;
+ return (lppa.ppa == rppa.ppa);
}
static inline int pblk_addr_in_cache(struct ppa_addr ppa)
@@ -1066,32 +1070,6 @@ static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
return p;
}
-static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.blk = line_id;
- ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
- ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
- ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
- ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
- ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
-
- return ppa;
-}
-
-static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-
- return ppa;
-}
-
static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
struct line_header *header)
{
@@ -1212,10 +1190,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
if (!ppa->c.is_cached &&
ppa->g.ch < geo->nr_chnls &&
- ppa->g.lun < geo->luns_per_chnl &&
+ ppa->g.lun < geo->nr_luns &&
ppa->g.pl < geo->nr_planes &&
- ppa->g.blk < geo->blks_per_lun &&
- ppa->g.pg < geo->pgs_per_blk &&
+ ppa->g.blk < geo->nr_chks &&
+ ppa->g.pg < geo->ws_per_chk &&
ppa->g.sec < geo->sec_per_pg)
continue;
@@ -1245,7 +1223,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
for (i = 0; i < rqd->nr_ppas; i++) {
ppa = ppa_list[i];
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
@@ -1288,11 +1266,6 @@ static inline unsigned int pblk_get_secs(struct bio *bio)
return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
}
-static inline sector_t pblk_get_sector(sector_t lba)
-{
- return lba * NR_PHY_IN_LOG;
-}
-
static inline void pblk_setup_uuid(struct pblk *pblk)
{
uuid_le uuid;
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
deleted file mode 100644
index 0993c14be860..000000000000
--- a/drivers/lightnvm/rrpc.c
+++ /dev/null
@@ -1,1625 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#include "rrpc.h"
-
-static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
-static DECLARE_RWSEM(rrpc_lock);
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags);
-
-#define rrpc_for_each_lun(rrpc, rlun, i) \
- for ((i) = 0, rlun = &(rrpc)->luns[0]; \
- (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
-
-static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
-
- lockdep_assert_held(&rrpc->rev_lock);
-
- if (a->addr == ADDR_EMPTY || !rblk)
- return;
-
- spin_lock(&rblk->lock);
-
- div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
- WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
- rblk->nr_invalid_pages++;
-
- spin_unlock(&rblk->lock);
-
- rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
-}
-
-static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned int len)
-{
- sector_t i;
-
- spin_lock(&rrpc->rev_lock);
- for (i = slba; i < slba + len; i++) {
- struct rrpc_addr *gp = &rrpc->trans_map[i];
-
- rrpc_page_invalidate(rrpc, gp);
- gp->rblk = NULL;
- }
- spin_unlock(&rrpc->rev_lock);
-}
-
-static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
- sector_t laddr, unsigned int pages)
-{
- struct nvm_rq *rqd;
- struct rrpc_inflight_rq *inf;
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
- if (!rqd)
- return ERR_PTR(-ENOMEM);
-
- inf = rrpc_get_inflight_rq(rqd);
- if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
- mempool_free(rqd, rrpc->rq_pool);
- return NULL;
- }
-
- return rqd;
-}
-
-static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
-
- rrpc_unlock_laddr(rrpc, inf);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
-{
- sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
- sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
- struct nvm_rq *rqd;
-
- while (1) {
- rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
- if (rqd)
- break;
-
- schedule();
- }
-
- if (IS_ERR(rqd)) {
- pr_err("rrpc: unable to acquire inflight IO\n");
- bio_io_error(bio);
- return;
- }
-
- rrpc_invalidate_range(rrpc, slba, len);
- rrpc_inflight_laddr_release(rrpc, rqd);
-}
-
-static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- return (rblk->next_page == dev->geo.sec_per_blk);
-}
-
-/* Calculate relative addr for the given block, considering instantiated LUNs */
-static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return rlun->id * dev->geo.sec_per_blk;
-}
-
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
- struct rrpc_addr *gp)
-{
- struct rrpc_block *rblk = gp->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- u64 addr = gp->addr;
- struct ppa_addr paddr;
-
- paddr.ppa = addr;
- paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
- paddr.g.ch = rlun->bppa.g.ch;
- paddr.g.lun = rlun->bppa.g.lun;
- paddr.g.blk = rblk->id;
-
- return paddr;
-}
-
-/* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
- struct rrpc_block **cur_rblk)
-{
- struct rrpc *rrpc = rlun->rrpc;
-
- if (*cur_rblk) {
- spin_lock(&(*cur_rblk)->lock);
- WARN_ON(!block_is_full(rrpc, *cur_rblk));
- spin_unlock(&(*cur_rblk)->lock);
- }
- *cur_rblk = new_rblk;
-}
-
-static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
- struct rrpc_lun *rlun)
-{
- struct rrpc_block *rblk = NULL;
-
- if (list_empty(&rlun->free_list))
- goto out;
-
- rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
-
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
-
-out:
- return rblk;
-}
-
-static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
- unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&rlun->lock);
- if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
- pr_err("nvm: rrpc: cannot give block to non GC request\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
-
- rblk = __rrpc_get_blk(rrpc, rlun);
- if (!rblk) {
- pr_err("nvm: rrpc: cannot get new block\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
- spin_unlock(&rlun->lock);
-
- bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
- rblk->next_page = 0;
- rblk->nr_invalid_pages = 0;
- atomic_set(&rblk->data_cmnt_size, 0);
-
- return rblk;
-}
-
-static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- if (rblk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&rblk->list, &rlun->free_list);
- rlun->nr_free_blocks++;
- rblk->state = NVM_BLK_ST_FREE;
- } else if (rblk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id, rblk->state);
- list_move_tail(&rblk->list, &rlun->bb_list);
- }
- spin_unlock(&rlun->lock);
-}
-
-static void rrpc_put_blks(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- if (rlun->cur)
- rrpc_put_blk(rrpc, rlun->cur);
- if (rlun->gc_cur)
- rrpc_put_blk(rrpc, rlun->gc_cur);
- }
-}
-
-static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
-{
- int next = atomic_inc_return(&rrpc->next_lun);
-
- return &rrpc->luns[next % rrpc->nr_luns];
-}
-
-static void rrpc_gc_kick(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- unsigned int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- queue_work(rrpc->krqd_wq, &rlun->ws_gc);
- }
-}
-
-/*
- * timed GC every interval.
- */
-static void rrpc_gc_timer(struct timer_list *t)
-{
- struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
-
- rrpc_gc_kick(rrpc);
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void rrpc_end_sync_bio(struct bio *bio)
-{
- struct completion *waiting = bio->bi_private;
-
- if (bio->bi_status)
- pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
-
- complete(waiting);
-}
-
-/*
- * rrpc_move_valid_pages -- migrate live data off the block
- * @rrpc: the 'rrpc' structure
- * @block: the block from which to migrate live pages
- *
- * Description:
- * GC algorithms may call this function to migrate remaining live
- * pages off the block prior to erasing it. This function blocks
- * further execution until the operation is complete.
- */
-static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct request_queue *q = dev->q;
- struct rrpc_rev_addr *rev;
- struct nvm_rq *rqd;
- struct bio *bio;
- struct page *page;
- int slot;
- int nr_sec_per_blk = dev->geo.sec_per_blk;
- u64 phys_addr;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
- return 0;
-
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- pr_err("nvm: could not alloc bio to gc\n");
- return -ENOMEM;
- }
-
- page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
-
- while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_sec_per_blk)) < nr_sec_per_blk) {
-
- /* Lock laddr */
- phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
-
-try:
- spin_lock(&rrpc->rev_lock);
- /* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr];
- /* already updated by previous regular write */
- if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
- continue;
- }
-
- rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
- if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
- schedule();
- goto try;
- }
-
- spin_unlock(&rrpc->rev_lock);
-
- /* Perform read to do GC */
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc read failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
- if (bio->bi_status) {
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
-
- bio_reset(bio);
- reinit_completion(&wait);
-
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- /* turn the command around and write the data back to a new
- * address
- */
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc write failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
-
- rrpc_inflight_laddr_release(rrpc, rqd);
- if (bio->bi_status)
- goto finished;
-
- bio_reset(bio);
- }
-
-finished:
- mempool_free(page, rrpc->page_pool);
- bio_put(bio);
-
- if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
- pr_err("nvm: failed to garbage collect block\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void rrpc_block_gc(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- struct ppa_addr ppa;
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- if (rrpc_move_valid_pages(rrpc, rblk))
- goto put_back;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
- ppa.g.blk = rblk->id;
-
- if (nvm_erase_sync(rrpc->dev, &ppa, 1))
- goto put_back;
-
- rrpc_put_blk(rrpc, rblk);
-
- return;
-
-put_back:
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-}
-
-/* the block with highest number of invalid pages, will be in the beginning
- * of the list
- */
-static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
- struct rrpc_block *rb)
-{
- if (ra->nr_invalid_pages == rb->nr_invalid_pages)
- return ra;
-
- return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
-}
-
-/* linearly find the block with highest number of invalid pages
- * requires lun->lock
- */
-static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
-{
- struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblk, *max;
-
- BUG_ON(list_empty(prio_list));
-
- max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblk, prio_list, prio)
- max = rblk_max_invalid(max, rblk);
-
- return max;
-}
-
-static void rrpc_lun_gc(struct work_struct *work)
-{
- struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
- struct rrpc *rrpc = rlun->rrpc;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block_gc *gcb;
- unsigned int nr_blocks_need;
-
- nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
-
- if (nr_blocks_need < rrpc->nr_luns)
- nr_blocks_need = rrpc->nr_luns;
-
- spin_lock(&rlun->lock);
- while (nr_blocks_need > rlun->nr_free_blocks &&
- !list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblk = block_prio_find_max(rlun);
-
- if (!rblk->nr_invalid_pages)
- break;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
- list_del_init(&rblk->prio);
-
- WARN_ON(!block_is_full(rrpc, rblk));
-
- pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
- INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
-
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-
- nr_blocks_need--;
- }
- spin_unlock(&rlun->lock);
-
- /* TODO: Hint that request queue can be started again */
-}
-
-static void rrpc_gc_queue(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-}
-
-static const struct block_device_operations rrpc_fops = {
- .owner = THIS_MODULE,
-};
-
-static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
-{
- unsigned int i;
- struct rrpc_lun *rlun, *max_free;
-
- if (!is_gc)
- return get_next_lun(rrpc);
-
- /* during GC, we don't care about RR, instead we want to make
- * sure that we maintain evenness between the block luns.
- */
- max_free = &rrpc->luns[0];
- /* prevent GC-ing lun from devouring pages of a lun with
- * little free blocks. We don't take the lock as we only need an
- * estimate.
- */
- rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->nr_free_blocks > max_free->nr_free_blocks)
- max_free = rlun;
- }
-
- return max_free;
-}
-
-static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
- struct rrpc_block *rblk, u64 paddr)
-{
- struct rrpc_addr *gp;
- struct rrpc_rev_addr *rev;
-
- BUG_ON(laddr >= rrpc->nr_sects);
-
- gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
- if (gp->rblk)
- rrpc_page_invalidate(rrpc, gp);
-
- gp->addr = paddr;
- gp->rblk = rblk;
-
- rev = &rrpc->rev_trans_map[gp->addr];
- rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
-
- return gp;
-}
-
-static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- u64 addr = ADDR_EMPTY;
-
- spin_lock(&rblk->lock);
- if (block_is_full(rrpc, rblk))
- goto out;
-
- addr = rblk->next_page;
-
- rblk->next_page++;
-out:
- spin_unlock(&rblk->lock);
- return addr;
-}
-
-/* Map logical address to a physical page. The mapping implements a round robin
- * approach and allocates a page from the next lun available.
- *
- * Returns rrpc_addr with the physical address and block. Returns NULL if no
- * blocks in the next rlun are available.
- */
-static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
- int is_gc)
-{
- struct nvm_tgt_dev *tgt_dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk, **cur_rblk;
- struct rrpc_addr *p;
- struct ppa_addr ppa;
- u64 paddr;
- int gc_force = 0;
-
- ppa.ppa = ADDR_EMPTY;
- rlun = rrpc_get_lun_rr(rrpc, is_gc);
-
- if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
- return ppa;
-
- /*
- * page allocation steps:
- * 1. Try to allocate new page from current rblk
- * 2a. If succeed, proceed to map it in and return
- * 2b. If fail, first try to allocate a new block from media manger,
- * and then retry step 1. Retry until the normal block pool is
- * exhausted.
- * 3. If exhausted, and garbage collector is requesting the block,
- * go to the reserved block and retry step 1.
- * In the case that this fails as well, or it is not GC
- * requesting, report not able to retrieve a block and let the
- * caller handle further processing.
- */
-
- spin_lock(&rlun->lock);
- cur_rblk = &rlun->cur;
- rblk = rlun->cur;
-retry:
- paddr = rrpc_alloc_addr(rrpc, rblk);
-
- if (paddr != ADDR_EMPTY)
- goto done;
-
- if (!list_empty(&rlun->wblk_list)) {
-new_blk:
- rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
- prio);
- rrpc_set_lun_cur(rlun, rblk, cur_rblk);
- list_del(&rblk->prio);
- goto retry;
- }
- spin_unlock(&rlun->lock);
-
- rblk = rrpc_get_blk(rrpc, rlun, gc_force);
- if (rblk) {
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->wblk_list);
- /*
- * another thread might already have added a new block,
- * Therefore, make sure that one is used, instead of the
- * one just added.
- */
- goto new_blk;
- }
-
- if (unlikely(is_gc) && !gc_force) {
- /* retry from emergency gc block */
- cur_rblk = &rlun->gc_cur;
- rblk = rlun->gc_cur;
- gc_force = 1;
- spin_lock(&rlun->lock);
- goto retry;
- }
-
- pr_err("rrpc: failed to allocate new block\n");
- return ppa;
-done:
- spin_unlock(&rlun->lock);
- p = rrpc_update_map(rrpc, laddr, rblk, paddr);
- if (!p)
- return ppa;
-
- /* return global address */
- return rrpc_ppa_to_gaddr(tgt_dev, p);
-}
-
-static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_block_gc *gcb;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb) {
- pr_err("rrpc: unable to queue block for gc.");
- return;
- }
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
-
- INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-}
-
-static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
-{
- struct rrpc_lun *rlun = NULL;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
- rrpc->luns[i].bppa.g.lun == p.g.lun) {
- rlun = &rrpc->luns[i];
- break;
- }
- }
-
- return rlun;
-}
-
-static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
-
- rlun = rrpc_ppa_to_lun(rrpc, ppa);
- rblk = &rlun->blocks[ppa.g.blk];
- rblk->state = NVM_BLK_ST_BAD;
-
- nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
-}
-
-static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- void *comp_bits = &rqd->ppa_status;
- struct ppa_addr ppa, prev_ppa;
- int nr_ppas = rqd->nr_ppas;
- int bit;
-
- if (rqd->nr_ppas == 1)
- __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
-
- ppa_set_empty(&prev_ppa);
- bit = -1;
- while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
- ppa = rqd->ppa_list[bit];
- if (ppa_cmp_blk(ppa, prev_ppa))
- continue;
-
- __rrpc_mark_bad_block(rrpc, ppa);
- }
-}
-
-static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
- sector_t laddr, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *p;
- struct rrpc_block *rblk;
- int cmnt_size, i;
-
- for (i = 0; i < npages; i++) {
- p = &rrpc->trans_map[laddr + i];
- rblk = p->rblk;
-
- cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == dev->geo.sec_per_blk))
- rrpc_run_gc(rrpc, rblk);
- }
-}
-
-static void rrpc_end_io(struct nvm_rq *rqd)
-{
- struct rrpc *rrpc = rqd->private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_ppas;
- sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
-
- if (bio_data_dir(rqd->bio) == WRITE) {
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- rrpc_mark_bad_block(rrpc, rqd);
-
- rrpc_end_io_write(rrpc, rrqd, laddr, npages);
- }
-
- bio_put(rqd->bio);
-
- if (rrqd->flags & NVM_IOTYPE_GC)
- return;
-
- rrpc_unlock_rq(rrpc, rqd);
-
- if (npages > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *gp;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr + i];
-
- if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- return NVM_IO_DONE;
- }
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
- unsigned long flags)
-{
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
- struct rrpc_addr *gp;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- BUG_ON(!(laddr < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr];
-
- if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- return NVM_IO_DONE;
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct ppa_addr p;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_list[i] = p;
- }
-
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct ppa_addr p;
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- p = rrpc_map_page(rrpc, laddr, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_addr = p;
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("rrpc: not able to allocate ppa list\n");
- return NVM_IO_ERR;
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
- npages);
-
- return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_rq(rrpc, bio, rqd, flags);
-
- return rrpc_read_rq(rrpc, bio, rqd, flags);
-}
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
- uint8_t nr_pages = rrpc_get_pages(bio);
- int bio_size = bio_sectors(bio) << 9;
- int err;
-
- if (bio_size < dev->geo.sec_size)
- return NVM_IO_ERR;
- else if (bio_size > dev->geo.max_rq_size)
- return NVM_IO_ERR;
-
- err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
- if (err)
- return err;
-
- bio_get(bio);
- rqd->bio = bio;
- rqd->private = rrpc;
- rqd->nr_ppas = nr_pages;
- rqd->end_io = rrpc_end_io;
- rrq->flags = flags;
-
- err = nvm_submit_io(dev, rqd);
- if (err) {
- pr_err("rrpc: I/O submission failed: %d\n", err);
- bio_put(bio);
- if (!(flags & NVM_IOTYPE_GC)) {
- rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- }
- return NVM_IO_ERR;
- }
-
- return NVM_IO_OK;
-}
-
-static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
-{
- struct rrpc *rrpc = q->queuedata;
- struct nvm_rq *rqd;
- int err;
-
- blk_queue_split(q, &bio);
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- rrpc_discard(rrpc, bio);
- return BLK_QC_T_NONE;
- }
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
- memset(rqd, 0, sizeof(struct nvm_rq));
-
- err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
- switch (err) {
- case NVM_IO_OK:
- return BLK_QC_T_NONE;
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
- case NVM_IO_REQUEUE:
- spin_lock(&rrpc->bio_lock);
- bio_list_add(&rrpc->requeue_bios, bio);
- spin_unlock(&rrpc->bio_lock);
- queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
- break;
- }
-
- mempool_free(rqd, rrpc->rq_pool);
- return BLK_QC_T_NONE;
-}
-
-static void rrpc_requeue(struct work_struct *work)
-{
- struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock(&rrpc->bio_lock);
- bio_list_merge(&bios, &rrpc->requeue_bios);
- bio_list_init(&rrpc->requeue_bios);
- spin_unlock(&rrpc->bio_lock);
-
- while ((bio = bio_list_pop(&bios)))
- rrpc_make_rq(rrpc->disk->queue, bio);
-}
-
-static void rrpc_gc_free(struct rrpc *rrpc)
-{
- if (rrpc->krqd_wq)
- destroy_workqueue(rrpc->krqd_wq);
-
- if (rrpc->kgc_wq)
- destroy_workqueue(rrpc->kgc_wq);
-}
-
-static int rrpc_gc_init(struct rrpc *rrpc)
-{
- rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
- rrpc->nr_luns);
- if (!rrpc->krqd_wq)
- return -ENOMEM;
-
- rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
- if (!rrpc->kgc_wq)
- return -ENOMEM;
-
- timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
-
- return 0;
-}
-
-static void rrpc_map_free(struct rrpc *rrpc)
-{
- vfree(rrpc->rev_trans_map);
- vfree(rrpc->trans_map);
-}
-
-static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- u64 i;
-
- for (i = 0; i < nlb; i++) {
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- unsigned int mod;
-
- /* LNVM treats address-spaces as silos, LBA and PBA are
- * equally large and zero-indexed.
- */
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("nvm: L2P data entry is out of bounds!\n");
- pr_err("nvm: Maybe loaded an old target L2P\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. As it often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- div_u64_rem(pba, rrpc->nr_sects, &mod);
-
- gaddr = rrpc_recov_addr(dev, pba);
- rlun = rrpc_ppa_to_lun(rrpc, gaddr);
- if (!rlun) {
- pr_err("rrpc: l2p corruption on lba %llu\n",
- slba + i);
- return -EINVAL;
- }
-
- rblk = &rlun->blocks[gaddr.g.blk];
- if (!rblk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
- }
-
- addr[i].addr = pba;
- addr[i].rblk = rblk;
- raddr[mod].addr = slba + i;
- }
-
- return 0;
-}
-
-static int rrpc_map_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t i;
- int ret;
-
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
- if (!rrpc->trans_map)
- return -ENOMEM;
-
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_sects);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
-
- for (i = 0; i < rrpc->nr_sects; i++) {
- struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
-
- p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
- }
-
- /* Bring up the mapping table from device */
- ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not read L2P table.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Minimum pages needed within a lun */
-#define PAGE_POOL_SIZE 16
-#define ADDR_POOL_SIZE 64
-
-static int rrpc_core_init(struct rrpc *rrpc)
-{
- down_write(&rrpc_lock);
- if (!rrpc_gcb_cache) {
- rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
- sizeof(struct rrpc_block_gc), 0, 0, NULL);
- if (!rrpc_gcb_cache) {
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
-
- rrpc_rq_cache = kmem_cache_create("rrpc_rq",
- sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
- 0, 0, NULL);
- if (!rrpc_rq_cache) {
- kmem_cache_destroy(rrpc_gcb_cache);
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
- }
- up_write(&rrpc_lock);
-
- rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!rrpc->page_pool)
- return -ENOMEM;
-
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
- rrpc_gcb_cache);
- if (!rrpc->gcb_pool)
- return -ENOMEM;
-
- rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
- if (!rrpc->rq_pool)
- return -ENOMEM;
-
- spin_lock_init(&rrpc->inflights.lock);
- INIT_LIST_HEAD(&rrpc->inflights.reqs);
-
- return 0;
-}
-
-static void rrpc_core_free(struct rrpc *rrpc)
-{
- mempool_destroy(rrpc->page_pool);
- mempool_destroy(rrpc->gcb_pool);
- mempool_destroy(rrpc->rq_pool);
-}
-
-static void rrpc_luns_free(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- vfree(rlun->blocks);
- }
-
- kfree(rrpc->luns);
-}
-
-static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
-{
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_block *rblk;
- struct ppa_addr ppa;
- u8 *blks;
- int nr_blks;
- int i;
- int ret;
-
- if (!dev->parent->ops->get_bb_tbl)
- return 0;
-
- nr_blks = geo->blks_per_lun * geo->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
-
- ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
- if (ret) {
- pr_err("rrpc: could not get BB table\n");
- goto out;
- }
-
- nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
- if (nr_blks < 0) {
- ret = nr_blks;
- goto out;
- }
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_FREE)
- continue;
-
- rblk = &rlun->blocks[i];
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- rlun->nr_free_blocks--;
- }
-
-out:
- kfree(blks);
- return ret;
-}
-
-static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
-{
- rlun->bppa.ppa = 0;
- rlun->bppa.g.ch = ppa.g.ch;
- rlun->bppa.g.lun = ppa.g.lun;
-}
-
-static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun;
- int i, j, ret = -EINVAL;
-
- if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
- pr_err("rrpc: number of pages per block too high.");
- return -EINVAL;
- }
-
- spin_lock_init(&rrpc->rev_lock);
-
- rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
- GFP_KERNEL);
- if (!rrpc->luns)
- return -ENOMEM;
-
- /* 1:1 mapping */
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- rlun->id = i;
- rrpc_set_lun_ppa(rlun, luns[i]);
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- geo->blks_per_lun);
- if (!rlun->blocks) {
- ret = -ENOMEM;
- goto err;
- }
-
- INIT_LIST_HEAD(&rlun->free_list);
- INIT_LIST_HEAD(&rlun->used_list);
- INIT_LIST_HEAD(&rlun->bb_list);
-
- for (j = 0; j < geo->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
-
- rblk->id = j;
- rblk->rlun = rlun;
- rblk->state = NVM_BLK_T_FREE;
- INIT_LIST_HEAD(&rblk->prio);
- INIT_LIST_HEAD(&rblk->list);
- spin_lock_init(&rblk->lock);
-
- list_add_tail(&rblk->list, &rlun->free_list);
- }
-
- rlun->rrpc = rrpc;
- rlun->nr_free_blocks = geo->blks_per_lun;
- rlun->reserved_blocks = 2; /* for GC only */
-
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->wblk_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
-
- if (rrpc_bb_discovery(dev, rlun))
- goto err;
-
- }
-
- return 0;
-err:
- return ret;
-}
-
-/* returns 0 on success and stores the beginning address in *begin */
-static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t size = rrpc->nr_sects * dev->geo.sec_size;
- int ret;
-
- size >>= 9;
-
- ret = nvm_get_area(dev, begin, size);
- if (!ret)
- *begin >>= (ilog2(dev->geo.sec_size) - 9);
-
- return ret;
-}
-
-static void rrpc_area_free(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
-
- nvm_put_area(dev, begin);
-}
-
-static void rrpc_free(struct rrpc *rrpc)
-{
- rrpc_gc_free(rrpc);
- rrpc_map_free(rrpc);
- rrpc_core_free(rrpc);
- rrpc_luns_free(rrpc);
- rrpc_area_free(rrpc);
-
- kfree(rrpc);
-}
-
-static void rrpc_exit(void *private)
-{
- struct rrpc *rrpc = private;
-
- del_timer(&rrpc->gc_timer);
-
- flush_workqueue(rrpc->krqd_wq);
- flush_workqueue(rrpc->kgc_wq);
-
- rrpc_free(rrpc);
-}
-
-static sector_t rrpc_capacity(void *private)
-{
- struct rrpc *rrpc = private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t reserved, provisioned;
-
- /* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
- provisioned = rrpc->nr_sects - reserved;
-
- if (reserved > rrpc->nr_sects) {
- pr_err("rrpc: not enough space available to expose storage.\n");
- return 0;
- }
-
- sector_div(provisioned, 10);
- return provisioned * 9 * NR_PHY_IN_LOG;
-}
-
-/*
- * Looks up the logical address from reverse trans map and check if its valid by
- * comparing the logical to physical address with the physical address.
- * Returns 0 on free, otherwise 1 if in use
- */
-static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- int offset;
- struct rrpc_addr *laddr;
- u64 bpaddr, paddr, pladdr;
-
- bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
- paddr = bpaddr + offset;
-
- pladdr = rrpc->rev_trans_map[paddr].addr;
- if (pladdr == ADDR_EMPTY)
- continue;
-
- laddr = &rrpc->trans_map[pladdr];
-
- if (paddr == laddr->addr) {
- laddr->rblk = rblk;
- } else {
- set_bit(offset, rblk->invalid_pages);
- rblk->nr_invalid_pages++;
- }
- }
-}
-
-static int rrpc_blocks_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int lun_iter, blk_iter;
-
- for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
- rlun = &rrpc->luns[lun_iter];
-
- for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
- blk_iter++) {
- rblk = &rlun->blocks[blk_iter];
- rrpc_block_map_update(rrpc, rblk);
- }
- }
-
- return 0;
-}
-
-static int rrpc_luns_configure(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
-
- /* Emergency gc block */
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
- }
-
- return 0;
-err:
- rrpc_put_blks(rrpc);
- return -EINVAL;
-}
-
-static struct nvm_tgt_type tt_rrpc;
-
-static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
- int flags)
-{
- struct request_queue *bqueue = dev->q;
- struct request_queue *tqueue = tdisk->queue;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc *rrpc;
- sector_t soffset;
- int ret;
-
- if (!(dev->identity.dom & NVM_RSP_L2P)) {
- pr_err("nvm: rrpc: device does not support l2p (%x)\n",
- dev->identity.dom);
- return ERR_PTR(-EINVAL);
- }
-
- rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
- if (!rrpc)
- return ERR_PTR(-ENOMEM);
-
- rrpc->dev = dev;
- rrpc->disk = tdisk;
-
- bio_list_init(&rrpc->requeue_bios);
- spin_lock_init(&rrpc->bio_lock);
- INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
-
- rrpc->nr_luns = geo->nr_luns;
- rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
-
- /* simple round-robin strategy */
- atomic_set(&rrpc->next_lun, -1);
-
- ret = rrpc_area_init(rrpc, &soffset);
- if (ret < 0) {
- pr_err("nvm: rrpc: could not initialize area\n");
- return ERR_PTR(ret);
- }
- rrpc->soffset = soffset;
-
- ret = rrpc_luns_init(rrpc, dev->luns);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize luns\n");
- goto err;
- }
-
- ret = rrpc_core_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize core\n");
- goto err;
- }
-
- ret = rrpc_map_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize maps\n");
- goto err;
- }
-
- ret = rrpc_blocks_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize state for blocks\n");
- goto err;
- }
-
- ret = rrpc_luns_configure(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
- goto err;
- }
-
- ret = rrpc_gc_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize gc\n");
- goto err;
- }
-
- /* inherit the size from the underlying device */
- blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
- blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
-
- pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
-
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-
- return rrpc;
-err:
- rrpc_free(rrpc);
- return ERR_PTR(ret);
-}
-
-/* round robin, page-based FTL, and cost-based GC */
-static struct nvm_tgt_type tt_rrpc = {
- .name = "rrpc",
- .version = {1, 0, 0},
-
- .make_rq = rrpc_make_rq,
- .capacity = rrpc_capacity,
-
- .init = rrpc_init,
- .exit = rrpc_exit,
-};
-
-static int __init rrpc_module_init(void)
-{
- return nvm_register_tgt_type(&tt_rrpc);
-}
-
-static void rrpc_module_exit(void)
-{
- nvm_unregister_tgt_type(&tt_rrpc);
-}
-
-module_init(rrpc_module_init);
-module_exit(rrpc_module_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
deleted file mode 100644
index fdb6ff902903..000000000000
--- a/drivers/lightnvm/rrpc.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#ifndef RRPC_H_
-#define RRPC_H_
-
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-
-#include <linux/lightnvm.h>
-
-/* Run only GC if less than 1/X blocks are free */
-#define GC_LIMIT_INVERSE 10
-#define GC_TIME_SECS 100
-
-#define RRPC_SECTOR (512)
-#define RRPC_EXPOSED_PAGE_SIZE (4096)
-
-#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
-
-struct rrpc_inflight {
- struct list_head reqs;
- spinlock_t lock;
-};
-
-struct rrpc_inflight_rq {
- struct list_head list;
- sector_t l_start;
- sector_t l_end;
-};
-
-struct rrpc_rq {
- struct rrpc_inflight_rq inflight_rq;
- unsigned long flags;
-};
-
-struct rrpc_block {
- int id; /* id inside of LUN */
- struct rrpc_lun *rlun;
-
- struct list_head prio; /* LUN CG list */
- struct list_head list; /* LUN free, used, bb list */
-
-#define MAX_INVALID_PAGES_STORAGE 8
- /* Bitmap for invalid page intries */
- unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
- /* points to the next writable page within a block */
- unsigned int next_page;
- /* number of pages that are invalid, wrt host page size */
- unsigned int nr_invalid_pages;
-
- int state;
-
- spinlock_t lock;
- atomic_t data_cmnt_size; /* data pages committed to stable storage */
-};
-
-struct rrpc_lun {
- struct rrpc *rrpc;
-
- int id;
- struct ppa_addr bppa;
-
- struct rrpc_block *cur, *gc_cur;
- struct rrpc_block *blocks; /* Reference to block allocation */
-
- struct list_head prio_list; /* Blocks that may be GC'ed */
- struct list_head wblk_list; /* Queued blocks to be written to */
-
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
- unsigned int nr_free_blocks; /* Number of unused blocks */
-
- struct work_struct ws_gc;
-
- int reserved_blocks;
-
- spinlock_t lock;
-};
-
-struct rrpc {
- struct nvm_tgt_dev *dev;
- struct gendisk *disk;
-
- sector_t soffset; /* logical sector offset */
-
- int nr_luns;
- struct rrpc_lun *luns;
-
- /* calculated values */
- unsigned long long nr_sects;
-
- /* Write strategy variables. Move these into each for structure for each
- * strategy
- */
- atomic_t next_lun; /* Whenever a page is written, this is updated
- * to point to the next write lun
- */
-
- spinlock_t bio_lock;
- struct bio_list requeue_bios;
- struct work_struct ws_requeue;
-
- /* Simple translation map of logical addresses to physical addresses.
- * The logical addresses is known by the host system, while the physical
- * addresses are used when writing to the disk block device.
- */
- struct rrpc_addr *trans_map;
- /* also store a reverse map for garbage collection */
- struct rrpc_rev_addr *rev_trans_map;
- spinlock_t rev_lock;
-
- struct rrpc_inflight inflights;
-
- mempool_t *addr_pool;
- mempool_t *page_pool;
- mempool_t *gcb_pool;
- mempool_t *rq_pool;
-
- struct timer_list gc_timer;
- struct workqueue_struct *krqd_wq;
- struct workqueue_struct *kgc_wq;
-};
-
-struct rrpc_block_gc {
- struct rrpc *rrpc;
- struct rrpc_block *rblk;
- struct work_struct ws_gc;
-};
-
-/* Logical to physical mapping */
-struct rrpc_addr {
- u64 addr;
- struct rrpc_block *rblk;
-};
-
-/* Physical to logical mapping */
-struct rrpc_rev_addr {
- u64 addr;
-};
-
-static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- return l;
-}
-
-static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
-{
- return linear_to_generic_addr(&dev->geo, pba);
-}
-
-static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
-}
-
-static inline sector_t rrpc_get_laddr(struct bio *bio)
-{
- return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
-}
-
-static inline unsigned int rrpc_get_pages(struct bio *bio)
-{
- return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
-}
-
-static inline sector_t rrpc_get_sector(sector_t laddr)
-{
- return laddr * NR_PHY_IN_LOG;
-}
-
-static inline int request_intersects(struct rrpc_inflight_rq *r,
- sector_t laddr_start, sector_t laddr_end)
-{
- return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
-}
-
-static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages, struct rrpc_inflight_rq *r)
-{
- sector_t laddr_end = laddr + pages - 1;
- struct rrpc_inflight_rq *rtmp;
-
- WARN_ON(irqs_disabled());
-
- spin_lock_irq(&rrpc->inflights.lock);
- list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
- if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
- /* existing, overlapping request, come back later */
- spin_unlock_irq(&rrpc->inflights.lock);
- return 1;
- }
- }
-
- r->l_start = laddr;
- r->l_end = laddr_end;
-
- list_add_tail(&r->list, &rrpc->inflights.reqs);
- spin_unlock_irq(&rrpc->inflights.lock);
- return 0;
-}
-
-static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages,
- struct rrpc_inflight_rq *r)
-{
- BUG_ON((laddr + pages) > rrpc->nr_sects);
-
- return __rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
-{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-
- return &rrqd->inflight_rq;
-}
-
-static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd)
-{
- sector_t laddr = rrpc_get_laddr(bio);
- unsigned int pages = rrpc_get_pages(bio);
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-
- return rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
- struct rrpc_inflight_rq *r)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rrpc->inflights.lock, flags);
- list_del_init(&r->list);
- spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
-}
-
-static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- uint8_t pages = rqd->nr_ppas;
-
- BUG_ON((r->l_start + pages) > rrpc->nr_sects);
-
- rrpc_unlock_laddr(rrpc, r);
-}
-
-#endif /* RRPC_H_ */
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 899ec1f4c833..346e6f5f77be 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1245,10 +1245,10 @@ static ssize_t smu_read(struct file *file, char __user *buf,
return -EBADFD;
}
-static unsigned int smu_fpoll(struct file *file, poll_table *wait)
+static __poll_t smu_fpoll(struct file *file, poll_table *wait)
{
struct smu_private *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c4c2b3b85ebc..e8b29fc532e1 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2157,11 +2157,11 @@ pmu_write(struct file *file, const char __user *buf,
return 0;
}
-static unsigned int
+static __poll_t
pmu_fpoll(struct file *filp, poll_table *wait)
{
struct pmu_private *pp = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 93f3d4d61fa7..f84730d63b1f 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -235,7 +235,7 @@ kfree_err:
return ret;
}
-static unsigned int
+static __poll_t
mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
{
struct mbox_test_device *tdev = filp->private_data;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 83b9362be09c..2c8ac3688815 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,6 +269,13 @@ config DM_BIO_PRISON
source "drivers/md/persistent-data/Kconfig"
+config DM_UNSTRIPED
+ tristate "Unstriped target"
+ depends on BLK_DEV_DM
+ ---help---
+ Unstripes I/O so it is issued solely on a single drive in a HW
+ RAID0 or dm-striped target.
+
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f701bb211783..63255f3ebd97 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
+obj-$(CONFIG_DM_UNSTRIPED) += dm-unstripe.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a0cc1bc6d884..6cc6c0f9c3a9 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -525,15 +525,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -550,7 +556,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 843877e017e1..5e2d4e80198e 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -320,14 +320,15 @@ struct cached_dev {
*/
atomic_t has_dirty;
- struct bch_ratelimit writeback_rate;
- struct delayed_work writeback_rate_update;
-
/*
- * Internal to the writeback code, so read_dirty() can keep track of
- * where it's at.
+ * Set to zero by things that touch the backing volume-- except
+ * writeback. Incremented by writeback. Used to determine when to
+ * accelerate idle writeback.
*/
- sector_t last_read;
+ atomic_t backing_idle;
+
+ struct bch_ratelimit writeback_rate;
+ struct delayed_work writeback_rate_update;
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
@@ -336,6 +337,14 @@ struct cached_dev {
struct keybuf writeback_keys;
+ /*
+ * Order the write-half of writeback operations strongly in dispatch
+ * order. (Maintain LBA order; don't allow reads completing out of
+ * order to re-order the writes...)
+ */
+ struct closure_waitlist writeback_ordering_wait;
+ atomic_t writeback_sequence_next;
+
/* For tracking sequential IO */
#define RECENT_IO_BITS 7
#define RECENT_IO (1 << RECENT_IO_BITS)
@@ -488,6 +497,7 @@ struct cache_set {
int caches_loaded;
struct bcache_device **devices;
+ unsigned devices_max_used;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
struct closure caching;
@@ -852,7 +862,7 @@ static inline void wake_up_allocators(struct cache_set *c)
/* Forward declarations */
-void bch_count_io_errors(struct cache *, blk_status_t, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
blk_status_t, const char *);
void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81e8dc3dbe5e..bf3a48aa9a9a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -419,7 +419,7 @@ static void do_btree_node_write(struct btree *b)
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
bset_sector_offset(&b->keys, i));
- if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
+ if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -432,6 +432,7 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL);
} else {
+ /* No problem for multipage bvec since the bio is just allocated */
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
@@ -1678,7 +1679,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
/* don't reclaim buckets to which writeback keys point */
rcu_read_lock();
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
@@ -1803,10 +1804,7 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c)
{
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
- if (IS_ERR(c->gc_thread))
- return PTR_ERR(c->gc_thread);
-
- return 0;
+ return PTR_ERR_OR_ZERO(c->gc_thread);
}
/* Initial partial gc */
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 1841d0359bac..7f12920c14f7 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
#include "closure.h"
@@ -18,10 +19,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
BUG_ON(flags & CLOSURE_GUARD_MASK);
BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
- /* Must deliver precisely one wakeup */
- if (r == 1 && (flags & CLOSURE_SLEEPING))
- wake_up_process(cl->task);
-
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
atomic_set(&cl->remaining,
@@ -100,28 +97,34 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
}
EXPORT_SYMBOL(closure_wait);
-/**
- * closure_sync - sleep until a closure has nothing left to wait on
- *
- * Sleeps until the refcount hits 1 - the thread that's running the closure owns
- * the last refcount.
- */
-void closure_sync(struct closure *cl)
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static void closure_sync_fn(struct closure *cl)
{
- while (1) {
- __closure_start_sleep(cl);
- closure_set_ret_ip(cl);
+ cl->s->done = 1;
+ wake_up_process(cl->s->task);
+}
- if ((atomic_read(&cl->remaining) &
- CLOSURE_REMAINING_MASK) == 1)
- break;
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
schedule();
}
- __closure_end_sleep(cl);
+ __set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(closure_sync);
+EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -168,12 +171,10 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s\n",
+ seq_printf(f, "%s%s\n",
test_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(&cl->work)) ? "Q" : "",
- r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "");
+ r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index ccfbea6f9f6b..3b9dfc9962ad 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -103,6 +103,7 @@
*/
struct closure;
+struct closure_syncer;
typedef void (closure_fn) (struct closure *);
struct closure_waitlist {
@@ -115,10 +116,6 @@ enum closure_state {
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
*
- * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
- * - indicates that cl->task is valid and closure_put() may wake it up.
- * Only set or cleared by the thread that owns the closure.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -128,22 +125,16 @@ enum closure_state {
* continue_at() and closure_return() clear it for you, if you're doing
* something unusual you can use closure_set_dead() which also helps
* annotate where references are being transferred.
- *
- * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
- * closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 23),
- CLOSURE_DESTRUCTOR = (1 << 23),
- CLOSURE_WAITING = (1 << 25),
- CLOSURE_SLEEPING = (1 << 27),
- CLOSURE_RUNNING = (1 << 29),
- CLOSURE_STACK = (1 << 31),
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
- CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -152,7 +143,7 @@ struct closure {
union {
struct {
struct workqueue_struct *wq;
- struct task_struct *task;
+ struct closure_syncer *s;
struct llist_node list;
closure_fn *fn;
};
@@ -178,7 +169,19 @@ void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
-void closure_sync(struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+ __closure_sync(cl);
+}
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -215,24 +218,6 @@ static inline void closure_set_waiting(struct closure *cl, unsigned long f)
#endif
}
-static inline void __closure_end_sleep(struct closure *cl)
-{
- __set_current_state(TASK_RUNNING);
-
- if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
- atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
-}
-
-static inline void __closure_start_sleep(struct closure *cl)
-{
- closure_set_ip(cl);
- cl->task = current;
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
- atomic_add(CLOSURE_SLEEPING, &cl->remaining);
-}
-
static inline void closure_set_stopped(struct closure *cl)
{
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
@@ -241,7 +226,6 @@ static inline void closure_set_stopped(struct closure *cl)
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
{
- BUG_ON(object_is_on_stack(cl));
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
@@ -300,7 +284,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
/**
@@ -322,6 +306,8 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* This is because after calling continue_at() you no longer have a ref on @cl,
* and whatever @cl owns may be freed out from under you - a running closure fn
* has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
*/
#define continue_at(_cl, _fn, _wq) \
do { \
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index c7a02c4900da..af89408befe8 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -116,7 +116,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
return;
check->bi_opf = REQ_OP_READ;
- if (bio_alloc_pages(check, GFP_NOIO))
+ if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
@@ -251,8 +251,7 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
- int ret = 0;
-
debug = debugfs_create_dir("bcache", NULL);
- return ret;
+
+ return IS_ERR_OR_NULL(debug);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index fac97ec2d0e2..a783c5a41ff1 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -51,7 +51,10 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
-void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
+void bch_count_io_errors(struct cache *ca,
+ blk_status_t error,
+ int is_read,
+ const char *m)
{
/*
* The halflife of an error is:
@@ -94,8 +97,9 @@ void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s, recovering",
- bdevname(ca->bdev, buf), m);
+ pr_err("%s: IO error on %s%s",
+ bdevname(ca->bdev, buf), m,
+ is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
"%s: too many IO errors %s",
@@ -108,6 +112,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
+ int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
@@ -129,7 +134,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
atomic_inc(&c->congested);
}
- bch_count_io_errors(ca, error, m);
+ bch_count_io_errors(ca, error, is_read, m);
}
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index d50c1c97da68..a24c3a95b2c0 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -162,7 +162,7 @@ static void read_moving(struct cache_set *c)
bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = read_moving_endio;
- if (bio_alloc_pages(bio, GFP_KERNEL))
+ if (bch_bio_alloc_pages(bio, GFP_KERNEL))
goto err;
trace_bcache_gc_copy(&w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 643c3021624f..1a46b41dac70 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -576,6 +576,7 @@ static void cache_lookup(struct closure *cl)
{
struct search *s = container_of(cl, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc;
int ret;
bch_btree_op_init(&s->op, -1);
@@ -588,6 +589,27 @@ static void cache_lookup(struct closure *cl)
return;
}
+ /*
+ * We might meet err when searching the btree, If that happens, we will
+ * get negative ret, in this scenario we should not recover data from
+ * backing device (when cache device is dirty) because we don't know
+ * whether bkeys the read request covered are all clean.
+ *
+ * And after that happened, s->iop.status is still its initial value
+ * before we submit s->bio.bio
+ */
+ if (ret < 0) {
+ BUG_ON(ret == -EINTR);
+ if (s->d && s->d->c &&
+ !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
+ dc = container_of(s->d, struct cached_dev, disk);
+ if (dc && atomic_read(&dc->has_dirty))
+ s->recoverable = false;
+ }
+ if (!s->iop.status)
+ s->iop.status = BLK_STS_IOERR;
+ }
+
closure_return(cl);
}
@@ -611,8 +633,8 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- struct request_queue *q = s->orig_bio->bi_disk->queue;
- generic_end_io_acct(q, bio_data_dir(s->orig_bio),
+ generic_end_io_acct(s->d->disk->queue,
+ bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -841,7 +863,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
cache_bio->bi_private = &s->cl;
bch_bio_map(cache_bio, NULL);
- if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+ if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
if (reada)
@@ -974,6 +996,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
+ atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b4d28928dec5..133b81225ea9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -211,7 +211,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio)
{
- struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
+ struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
@@ -274,7 +274,9 @@ static void write_super_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- bch_count_io_errors(ca, bio->bi_status, "writing superblock");
+ /* is_read = 0 */
+ bch_count_io_errors(ca, bio->bi_status, 0,
+ "writing superblock");
closure_put(&ca->set->sb_write);
}
@@ -721,6 +723,9 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
d->c = c;
c->devices[id] = d;
+ if (id >= c->devices_max_used)
+ c->devices_max_used = id + 1;
+
closure_get(&c->caching);
}
@@ -906,6 +911,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
@@ -1166,7 +1177,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev->bd_holder = dc;
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (cached_dev_init(dc, sb->block_size << 9))
@@ -1261,7 +1272,7 @@ static int flash_devs_run(struct cache_set *c)
struct uuid_entry *u;
for (u = c->uuids;
- u < c->uuids + c->nr_uuids && !ret;
+ u < c->uuids + c->devices_max_used && !ret;
u++)
if (UUID_FLASH_ONLY(u))
ret = flash_dev_run(c, u);
@@ -1427,7 +1438,7 @@ static void __cache_set_unregister(struct closure *cl)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++)
+ for (i = 0; i < c->devices_max_used; i++)
if (c->devices[i]) {
if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
@@ -1490,7 +1501,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
-
+ c->devices_max_used = 0;
c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
@@ -1810,7 +1821,7 @@ void bch_cache_release(struct kobject *kobj)
free_fifo(&ca->free[i]);
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(ca->sb_bio.bi_io_vec[0].bv_page);
+ put_page(bio_first_page_all(&ca->sb_bio));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1864,7 +1875,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev->bd_holder = ca;
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index e548b8b51322..a23cd6a14b74 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -249,6 +249,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
: 0;
}
+/*
+ * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
+ * the preferred way is bio_add_page, but in this case, bch_bio_map()
+ * supposes that the bvec table is empty, so it is safe to access
+ * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
+ * supported.
+ */
void bch_bio_map(struct bio *bio, void *base)
{
size_t size = bio->bi_iter.bi_size;
@@ -276,6 +283,33 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}
+/**
+ * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+ int i;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ bv->bv_page = alloc_page(gfp_mask);
+ if (!bv->bv_page) {
+ while (--bv >= bio->bi_io_vec)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
/*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
* use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ed5e8a412eb8..4df4c5c1cab2 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -558,6 +558,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
}
void bch_bio_map(struct bio *bio, void *base);
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
static inline sector_t bdev_sectors(struct block_device *bdev)
{
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 56a37884ca8b..51306a19ab03 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -18,17 +18,39 @@
#include <trace/events/bcache.h>
/* Rate limiting */
-
-static void __update_writeback_rate(struct cached_dev *dc)
+static uint64_t __calc_target_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
+
+ /*
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
bcache_flash_devs_sectors_dirty(c);
+
+ /*
+ * Unfortunately there is no control of global dirty data. If the
+ * user states that they want 10% dirty data in the cache, and has,
+ * e.g., 5 backing volumes of equal size, we try and ensure each
+ * backing volume uses about 2% of the cache for dirty data.
+ */
+ uint32_t bdev_share =
+ div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ c->cached_dev_sectors);
+
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
- int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
- c->cached_dev_sectors);
+ /* Ensure each backing dev gets at least one dirty share */
+ if (bdev_share < 1)
+ bdev_share = 1;
+
+ return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
+}
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
/*
* PI controller:
* Figures out the amount that should be written per second.
@@ -49,6 +71,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
* This acts as a slow, long-term average that is not subject to
* variations in usage like the p term.
*/
+ int64_t target = __calc_target_rate(dc);
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t error = dirty - target;
int64_t proportional_scaled =
@@ -116,6 +139,7 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
struct dirty_io {
struct closure cl;
struct cached_dev *dc;
+ uint16_t sequence;
struct bio bio;
};
@@ -194,6 +218,27 @@ static void write_dirty(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+
+ uint16_t next_sequence;
+
+ if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
+ /* Not our turn to write; wait for a write to complete */
+ closure_wait(&dc->writeback_ordering_wait, cl);
+
+ if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
+ /*
+ * Edge case-- it happened in indeterminate order
+ * relative to when we were added to wait list..
+ */
+ closure_wake_up(&dc->writeback_ordering_wait);
+ }
+
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ return;
+ }
+
+ next_sequence = io->sequence + 1;
/*
* IO errors are signalled using the dirty bit on the key.
@@ -211,6 +256,9 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
}
+ atomic_set(&dc->writeback_sequence_next, next_sequence);
+ closure_wake_up(&dc->writeback_ordering_wait);
+
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -219,8 +267,10 @@ static void read_dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
+ /* is_read = 1 */
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
- bio->bi_status, "reading dirty data from cache");
+ bio->bi_status, 1,
+ "reading dirty data from cache");
dirty_endio(bio);
}
@@ -237,10 +287,15 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc)
{
unsigned delay = 0;
- struct keybuf_key *w;
+ struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
+ size_t size;
+ int nk, i;
struct dirty_io *io;
struct closure cl;
+ uint16_t sequence = 0;
+ BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
+ atomic_set(&dc->writeback_sequence_next, sequence);
closure_init_stack(&cl);
/*
@@ -248,45 +303,109 @@ static void read_dirty(struct cached_dev *dc)
* mempools.
*/
- while (!kthread_should_stop()) {
-
- w = bch_keybuf_next(&dc->writeback_keys);
- if (!w)
- break;
-
- BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
-
- if (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50)
- while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
-
- dc->last_read = KEY_OFFSET(&w->key);
-
- io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
- * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
- GFP_KERNEL);
- if (!io)
- goto err;
-
- w->private = io;
- io->dc = dc;
-
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
- io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
- io->bio.bi_end_io = read_dirty_endio;
-
- if (bio_alloc_pages(&io->bio, GFP_KERNEL))
- goto err_free;
+ next = bch_keybuf_next(&dc->writeback_keys);
+
+ while (!kthread_should_stop() && next) {
+ size = 0;
+ nk = 0;
+
+ do {
+ BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
+
+ /*
+ * Don't combine too many operations, even if they
+ * are all small.
+ */
+ if (nk >= MAX_WRITEBACKS_IN_PASS)
+ break;
+
+ /*
+ * If the current operation is very large, don't
+ * further combine operations.
+ */
+ if (size >= MAX_WRITESIZE_IN_PASS)
+ break;
+
+ /*
+ * Operations are only eligible to be combined
+ * if they are contiguous.
+ *
+ * TODO: add a heuristic willing to fire a
+ * certain amount of non-contiguous IO per pass,
+ * so that we can benefit from backing device
+ * command queueing.
+ */
+ if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
+ &START_KEY(&next->key)))
+ break;
+
+ size += KEY_SIZE(&next->key);
+ keys[nk++] = next;
+ } while ((next = bch_keybuf_next(&dc->writeback_keys)));
+
+ /* Now we have gathered a set of 1..5 keys to write back. */
+ for (i = 0; i < nk; i++) {
+ w = keys[i];
+
+ io = kzalloc(sizeof(struct dirty_io) +
+ sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->dc = dc;
+ io->sequence = sequence++;
+
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
+ bio_set_dev(&io->bio,
+ PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
+ io->bio.bi_end_io = read_dirty_endio;
+
+ if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ goto err_free;
+
+ trace_bcache_writeback(&w->key);
+
+ down(&dc->in_flight);
+
+ /* We've acquired a semaphore for the maximum
+ * simultaneous number of writebacks; from here
+ * everything happens asynchronously.
+ */
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ }
- trace_bcache_writeback(&w->key);
+ delay = writeback_delay(dc, size);
- down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ /* If the control system would wait for at least half a
+ * second, and there's been no reqs hitting the backing disk
+ * for awhile: use an alternate mode where we have at most
+ * one contiguous set of writebacks in flight at a time. If
+ * someone wants to do IO it will be quick, as it will only
+ * have to contend with one operation in flight, and we'll
+ * be round-tripping data to the backing disk as quickly as
+ * it can accept it.
+ */
+ if (delay >= HZ / 2) {
+ /* 3 means at least 1.5 seconds, up to 7.5 if we
+ * have slowed way down.
+ */
+ if (atomic_inc_return(&dc->backing_idle) >= 3) {
+ /* Wait for current I/Os to finish */
+ closure_sync(&cl);
+ /* And immediately launch a new set. */
+ delay = 0;
+ }
+ }
- delay = writeback_delay(dc, KEY_SIZE(&w->key));
+ while (!kthread_should_stop() && delay) {
+ schedule_timeout_interruptible(delay);
+ delay = writeback_delay(dc, 0);
+ }
}
if (0) {
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index a9e3ffb4b03c..66f1c527fa24 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -5,6 +5,16 @@
#define CUTOFF_WRITEBACK 40
#define CUTOFF_WRITEBACK_SYNC 70
+#define MAX_WRITEBACKS_IN_PASS 5
+#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
+
+/*
+ * 14 (16384ths) is chosen here as something that each backing device
+ * should be a reasonable fraction of the share, and not to blow up
+ * until individual backing devices are a petabyte.
+ */
+#define WRITEBACK_SHARE_SHIFT 14
+
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{
uint64_t i, ret = 0;
@@ -21,7 +31,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c546b567f3b5..414c9af54ded 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -662,7 +662,7 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
- if (rw != WRITE) {
+ if (rw != REQ_OP_WRITE) {
n_sectors = 1 << b->c->sectors_per_block_bits;
offset = 0;
} else {
@@ -740,7 +740,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
b->write_end = b->dirty_end;
if (!write_list)
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
@@ -753,7 +753,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
cond_resched();
}
blk_finish_plug(&plug);
@@ -1123,7 +1123,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL;
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -1193,7 +1193,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c);
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
dm_bufio_release(b);
cond_resched();
@@ -1454,7 +1454,7 @@ retry:
old_block = b->block;
__unlink_buffer(b);
__link_buffer(b, new_block, b->list_mode);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
wait_on_bit_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
__unlink_buffer(b);
@@ -1716,7 +1716,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE_NAME(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
@@ -1727,7 +1727,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
}
@@ -1738,27 +1738,28 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!b) {
r = -ENOMEM;
- goto bad_buffer;
+ goto bad;
}
__free_buffer_wake(b);
}
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
+ c->shrinker.seeks = 1;
+ c->shrinker.batch = 0;
+ r = register_shrinker(&c->shrinker);
+ if (r)
+ goto bad;
+
mutex_lock(&dm_bufio_clients_lock);
dm_bufio_client_count++;
list_add(&c->client_list, &dm_bufio_all_clients);
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.count_objects = dm_bufio_shrink_count;
- c->shrinker.scan_objects = dm_bufio_shrink_scan;
- c->shrinker.seeks = 1;
- c->shrinker.batch = 0;
- register_shrinker(&c->shrinker);
-
return c;
-bad_buffer:
-bad_cache:
+bad:
while (!list_empty(&c->reserved_buffers)) {
struct dm_buffer *b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1767,6 +1768,7 @@ bad_cache:
}
dm_io_client_destroy(c->dm_io);
bad_dm_io:
+ mutex_destroy(&c->lock);
kfree(c);
bad_client:
return ERR_PTR(r);
@@ -1811,6 +1813,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
BUG_ON(c->n_buffers[i]);
dm_io_client_destroy(c->dm_io);
+ mutex_destroy(&c->lock);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 6a14f945783c..3222e21cbbf8 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -91,8 +91,7 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
- mempool_t *io_pool;
-
+ struct bio_set *io_bs;
struct bio_set *bs;
/*
@@ -130,8 +129,6 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
-void dm_init_md_queue(struct mapped_device *md);
-void dm_init_normal_md_queue(struct mapped_device *md);
int md_in_flight(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 554d60394c06..8168f737590e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1446,7 +1446,6 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
- bv->bv_page = NULL;
}
}
@@ -2194,6 +2193,8 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher_auth);
kzfree(cc->authenc_key);
+ mutex_destroy(&cc->bio_alloc_lock);
+
/* Must zero key material before freeing */
kzfree(cc);
}
@@ -2703,8 +2704,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- cc->bs = bioset_create(MIN_IOS, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
goto bad;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 288386bfbfb5..1783d80c9cad 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -229,6 +229,8 @@ static void delay_dtr(struct dm_target *ti)
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
+ mutex_destroy(&dc->timer_lock);
+
kfree(dc);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b82cb1ab1eaa..1b907b15f5c3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -70,6 +70,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
arg_name = dm_shift_arg(as);
argc--;
+ if (!arg_name) {
+ ti->error = "Insufficient feature arguments";
+ return -EINVAL;
+ }
+
/*
* drop_writes
*/
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index b4357ed4d541..a8d914d5abbe 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -58,8 +58,7 @@ struct dm_io_client *dm_io_client_create(void)
if (!client->pool)
goto bad;
- client->bios = bioset_create(min_ios, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ client->bios = bioset_create(min_ios, 0, BIOSET_NEED_BVECS);
if (!client->bios)
goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e52676fa9832..3f6791afd3e4 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1929,10 +1929,10 @@ static int dm_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned dm_poll(struct file *filp, poll_table *wait)
+static __poll_t dm_poll(struct file *filp, poll_table *wait)
{
struct dm_file *priv = filp->private_data;
- unsigned mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dm_global_eventq, wait);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index eb45cc3df31d..e6e7c686646d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -477,8 +477,10 @@ static int run_complete_job(struct kcopyd_job *job)
* If this is the master job, the sub jobs have already
* completed so we can free everything.
*/
- if (job->master_job == job)
+ if (job->master_job == job) {
+ mutex_destroy(&job->lock);
mempool_free(job, kc->job_pool);
+ }
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -750,6 +752,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
+ mutex_init(&job->lock);
/*
* set up for the read.
@@ -811,7 +814,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
else {
- mutex_init(&job->lock);
job->progress = 0;
split_job(job);
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 189badbeddaf..3362d866793b 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -594,7 +594,7 @@ static int log_mark(struct log_writes_c *lc, char *data)
return -ENOMEM;
}
- block->data = kstrndup(data, maxsize, GFP_KERNEL);
+ block->data = kstrndup(data, maxsize - 1, GFP_KERNEL);
if (!block->data) {
DMERR("Error copying mark data");
kfree(block);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f7810cc869ac..7d3e572072f5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -64,36 +64,30 @@ struct priority_group {
/* Multipath context */
struct multipath {
- struct list_head list;
- struct dm_target *ti;
-
- const char *hw_handler_name;
- char *hw_handler_params;
+ unsigned long flags; /* Multipath state flags */
spinlock_t lock;
-
- unsigned nr_priority_groups;
- struct list_head priority_groups;
-
- wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ enum dm_queue_mode queue_mode;
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- unsigned long flags; /* Multipath state flags */
+ atomic_t nr_valid_paths; /* Total number of usable paths */
+ unsigned nr_priority_groups;
+ struct list_head priority_groups;
+ const char *hw_handler_name;
+ char *hw_handler_params;
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
-
- atomic_t nr_valid_paths; /* Total number of usable paths */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
- enum dm_queue_mode queue_mode;
-
struct mutex work_mutex;
struct work_struct trigger_event;
+ struct dm_target *ti;
struct work_struct process_queued_bios;
struct bio_list queued_bios;
@@ -135,10 +129,10 @@ static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
- if (pgpath) {
- pgpath->is_active = true;
- INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
- }
+ if (!pgpath)
+ return NULL;
+
+ pgpath->is_active = true;
return pgpath;
}
@@ -193,13 +187,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- set_bit(MPATHF_QUEUE_IO, &m->flags);
atomic_set(&m->nr_valid_paths, 0);
- atomic_set(&m->pg_init_in_progress, 0);
- atomic_set(&m->pg_init_count, 0);
- m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
- init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
m->queue_mode = DM_TYPE_NONE;
@@ -221,13 +210,26 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
- } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+
+ } else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
- /*
- * bio-based doesn't support any direct scsi_dh management;
- * it just discovers if a scsi_dh is attached.
- */
- set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+
+ if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ /*
+ * bio-based doesn't support any direct scsi_dh management;
+ * it just discovers if a scsi_dh is attached.
+ */
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+ }
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
+ atomic_set(&m->pg_init_in_progress, 0);
+ atomic_set(&m->pg_init_count, 0);
+ m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
+ init_waitqueue_head(&m->pg_init_wait);
}
dm_table_set_type(ti->table, m->queue_mode);
@@ -246,6 +248,7 @@ static void free_multipath(struct multipath *m)
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
+ mutex_destroy(&m->work_mutex);
kfree(m);
}
@@ -264,29 +267,23 @@ static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
return dm_per_bio_data(bio, multipath_per_bio_data_size());
}
-static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
+static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
{
/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
- struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
void *bio_details = mpio + 1;
-
return bio_details;
}
-static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
- struct dm_bio_details **bio_details_p)
+static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
{
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
- struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
+ struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
- memset(mpio, 0, sizeof(*mpio));
- memset(bio_details, 0, sizeof(*bio_details));
- dm_bio_record(bio_details, bio);
+ mpio->nr_bytes = bio->bi_iter.bi_size;
+ mpio->pgpath = NULL;
+ *mpio_p = mpio;
- if (mpio_p)
- *mpio_p = mpio;
- if (bio_details_p)
- *bio_details_p = bio_details;
+ dm_bio_record(bio_details, bio);
}
/*-----------------------------------------------
@@ -340,6 +337,9 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
{
m->current_pg = pg;
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ return;
+
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
@@ -385,7 +385,8 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
- clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED)
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed;
}
@@ -516,12 +517,10 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_KILL;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
- if (pg_init_all_paths(m))
- return DM_MAPIO_DELAY_REQUEUE;
- return DM_MAPIO_REQUEUE;
+ pg_init_all_paths(m);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- memset(mpio, 0, sizeof(*mpio));
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
@@ -530,12 +529,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
- bool queue_dying = blk_queue_dying(q);
- if (queue_dying) {
+ if (blk_queue_dying(q)) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- return DM_MAPIO_DELAY_REQUEUE;
+
+ /*
+ * blk-mq's SCHED_RESTART can cover this requeue, so we
+ * needn't deal with it by DELAY_REQUEUE. More importantly,
+ * we have to return DM_MAPIO_REQUEUE so that blk-mq can
+ * get the queue busy feedback (via BLK_STS_RESOURCE),
+ * otherwise I/O merging can suffer.
+ */
+ if (q->mq_ops)
+ return DM_MAPIO_REQUEUE;
+ else
+ return DM_MAPIO_DELAY_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -557,9 +567,9 @@ static void multipath_release_clone(struct request *clone)
/*
* Map cloned bios (bio-based multipath)
*/
-static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
+
+static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
- size_t nr_bytes = bio->bi_iter.bi_size;
struct pgpath *pgpath;
unsigned long flags;
bool queue_io;
@@ -568,7 +578,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
- pgpath = choose_pgpath(m, nr_bytes);
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
@@ -576,14 +586,62 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, bio);
spin_unlock_irqrestore(&m->lock, flags);
+
/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
else if (!queue_io)
queue_work(kmultipathd, &m->process_queued_bios);
- return DM_MAPIO_SUBMITTED;
+
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return pgpath;
+}
+
+static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio)
+{
+ struct pgpath *pgpath;
+ unsigned long flags;
+
+ /* Do we need to select a new pgpath? */
+ /*
+ * FIXME: currently only switching path if no path (due to failure, etc)
+ * - which negates the point of using a path selector
+ */
+ pgpath = READ_ONCE(m->current_pgpath);
+ if (!pgpath)
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+
+ if (!pgpath) {
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ /* Queue for the daemon to resubmit */
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, bio);
+ spin_unlock_irqrestore(&m->lock, flags);
+ queue_work(kmultipathd, &m->process_queued_bios);
+
+ return ERR_PTR(-EAGAIN);
+ }
+ return NULL;
}
+ return pgpath;
+}
+
+static int __multipath_map_bio(struct multipath *m, struct bio *bio,
+ struct dm_mpath_io *mpio)
+{
+ struct pgpath *pgpath;
+
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ pgpath = __map_bio_nvme(m, bio);
+ else
+ pgpath = __map_bio(m, bio);
+
+ if (IS_ERR(pgpath))
+ return DM_MAPIO_SUBMITTED;
+
if (!pgpath) {
if (must_push_back_bio(m))
return DM_MAPIO_REQUEUE;
@@ -592,7 +650,6 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
}
mpio->pgpath = pgpath;
- mpio->nr_bytes = nr_bytes;
bio->bi_status = 0;
bio_set_dev(bio, pgpath->path.dev->bdev);
@@ -601,7 +658,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
&pgpath->path,
- nr_bytes);
+ mpio->nr_bytes);
return DM_MAPIO_REMAPPED;
}
@@ -610,8 +667,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = NULL;
- multipath_init_per_bio_data(bio, &mpio, NULL);
-
+ multipath_init_per_bio_data(bio, &mpio);
return __multipath_map_bio(m, bio, mpio);
}
@@ -619,7 +675,8 @@ static void process_queued_io_list(struct multipath *m)
{
if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
- else if (m->queue_mode == DM_TYPE_BIO_BASED)
+ else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
}
@@ -649,7 +706,9 @@ static void process_queued_bios(struct work_struct *work)
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
- r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
+ r = __multipath_map_bio(m, bio, mpio);
switch (r) {
case DM_MAPIO_KILL:
bio->bi_status = BLK_STS_IOERR;
@@ -752,34 +811,11 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
return 0;
}
-static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
- struct dm_target *ti)
+static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error)
{
- int r;
- struct pgpath *p;
- struct multipath *m = ti->private;
- struct request_queue *q = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
const char *attached_handler_name;
-
- /* we need at least a path arg */
- if (as->argc < 1) {
- ti->error = "no device given";
- return ERR_PTR(-EINVAL);
- }
-
- p = alloc_pgpath();
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
- &p->path.dev);
- if (r) {
- ti->error = "error getting device";
- goto bad;
- }
-
- if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
- q = bdev_get_queue(p->path.dev->bdev);
+ int r;
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
@@ -811,26 +847,59 @@ retain:
char b[BDEVNAME_SIZE];
printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
- bdevname(p->path.dev->bdev, b));
+ bdevname(bdev, b));
goto retain;
}
if (r < 0) {
- ti->error = "error attaching hardware handler";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "error attaching hardware handler";
+ return r;
}
if (m->hw_handler_params) {
r = scsi_dh_set_params(q, m->hw_handler_params);
if (r < 0) {
- ti->error = "unable to set hardware "
- "handler parameters";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "unable to set hardware handler parameters";
+ return r;
}
}
}
+ return 0;
+}
+
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
+ struct dm_target *ti)
+{
+ int r;
+ struct pgpath *p;
+ struct multipath *m = ti->private;
+
+ /* we need at least a path arg */
+ if (as->argc < 1) {
+ ti->error = "no device given";
+ return ERR_PTR(-EINVAL);
+ }
+
+ p = alloc_pgpath();
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
+ &p->path.dev);
+ if (r) {
+ ti->error = "error getting device";
+ goto bad;
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
+ r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
+ if (r) {
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ }
+
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
@@ -838,7 +907,6 @@ retain:
}
return p;
-
bad:
free_pgpath(p);
return ERR_PTR(r);
@@ -933,7 +1001,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
- if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
dm_consume_args(as, hw_argc);
DMERR("bio-based multipath doesn't allow hardware handler args");
return 0;
@@ -1022,6 +1091,8 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
+ else if (!strcasecmp(queue_mode_name, "nvme"))
+ m->queue_mode = DM_TYPE_NVME_BIO_BASED;
else if (!strcasecmp(queue_mode_name, "rq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1122,7 +1193,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
- if (m->queue_mode == DM_TYPE_BIO_BASED)
+ if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
else
ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1151,16 +1222,19 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ if (m->hw_handler_name) {
+ set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+ }
- flush_workqueue(kmpath_handlerd);
- multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
-
- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
}
static void multipath_dtr(struct dm_target *ti)
@@ -1475,21 +1549,6 @@ static void activate_path_work(struct work_struct *work)
activate_or_offline_path(pgpath);
}
-static int noretry_error(blk_status_t error)
-{
- switch (error) {
- case BLK_STS_NOTSUPP:
- case BLK_STS_NOSPC:
- case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
- case BLK_STS_MEDIUM:
- return 1;
- }
-
- /* Anything else could be a path failure, so should be retried */
- return 0;
-}
-
static int multipath_end_io(struct dm_target *ti, struct request *clone,
blk_status_t error, union map_info *map_context)
{
@@ -1508,10 +1567,13 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
* request into dm core, which will remake a clone request and
* clone bios for it and resubmit it later.
*/
- if (error && !noretry_error(error)) {
+ if (error && blk_path_error(error)) {
struct multipath *m = ti->private;
- r = DM_ENDIO_REQUEUE;
+ if (error == BLK_STS_RESOURCE)
+ r = DM_ENDIO_DELAY_REQUEUE;
+ else
+ r = DM_ENDIO_REQUEUE;
if (pgpath)
fail_path(pgpath);
@@ -1536,7 +1598,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
}
static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
- blk_status_t *error)
+ blk_status_t *error)
{
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
@@ -1544,7 +1606,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
unsigned long flags;
int r = DM_ENDIO_DONE;
- if (!*error || noretry_error(*error))
+ if (!*error || !blk_path_error(*error))
goto done;
if (pgpath)
@@ -1561,9 +1623,6 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
goto done;
}
- /* Queue for the daemon to resubmit */
- dm_bio_restore(get_bio_details_from_bio(clone), clone);
-
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, clone);
spin_unlock_irqrestore(&m->lock, flags);
@@ -1671,6 +1730,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
+ case DM_TYPE_NVME_BIO_BASED:
+ DMEMIT("queue_mode nvme ");
+ break;
case DM_TYPE_MQ_REQUEST_BASED:
DMEMIT("queue_mode mq ");
break;
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 23f178641794..969c4f1a3633 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -195,9 +195,6 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list) {
if (!best ||
(atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
@@ -210,6 +207,9 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6319d846e0ad..7ef469e902c6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -29,6 +29,9 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
+/* Global list of all raid sets */
+static LIST_HEAD(raid_sets);
+
static bool devices_handle_discard_safely = false;
/*
@@ -105,8 +108,6 @@ struct raid_dev {
#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
#define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
-#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET)
-
/*
* Definitions of various constructor flags to
* be used in checks of valid / invalid flags
@@ -209,6 +210,8 @@ struct raid_dev {
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
#define RT_FLAG_RS_SUSPENDED 5
+#define RT_FLAG_RS_IN_SYNC 6
+#define RT_FLAG_RS_RESYNCING 7
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -224,8 +227,8 @@ struct rs_layout {
struct raid_set {
struct dm_target *ti;
+ struct list_head list;
- uint32_t bitmap_loaded;
uint32_t stripe_cache_entries;
unsigned long ctr_flags;
unsigned long runtime_flags;
@@ -270,6 +273,19 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
mddev->new_chunk_sectors = l->new_chunk_sectors;
}
+/* Find any raid_set in active slot for @rs on global list */
+static struct raid_set *rs_find_active(struct raid_set *rs)
+{
+ struct raid_set *r;
+ struct mapped_device *md = dm_table_get_md(rs->ti->table);
+
+ list_for_each_entry(r, &raid_sets, list)
+ if (r != rs && dm_table_get_md(r->ti->table) == md)
+ return r;
+
+ return NULL;
+}
+
/* raid10 algorithms (i.e. formats) */
#define ALGORITHM_RAID10_DEFAULT 0
#define ALGORITHM_RAID10_NEAR 1
@@ -572,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
}
/* Return md raid10 algorithm for @name */
-static int raid10_name_to_format(const char *name)
+static const int raid10_name_to_format(const char *name)
{
if (!strcasecmp(name, "near"))
return ALGORITHM_RAID10_NEAR;
@@ -675,15 +691,11 @@ static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
return NULL;
}
-/*
- * Conditionally change bdev capacity of @rs
- * in case of a disk add/remove reshape
- */
-static void rs_set_capacity(struct raid_set *rs)
+/* Adjust rdev sectors */
+static void rs_set_rdev_sectors(struct raid_set *rs)
{
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
- struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
/*
* raid10 sets rdev->sector to the device size, which
@@ -692,8 +704,16 @@ static void rs_set_capacity(struct raid_set *rs)
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags))
rdev->sectors = mddev->dev_sectors;
+}
- set_capacity(gendisk, mddev->array_sectors);
+/*
+ * Change bdev capacity of @rs in case of a disk add/remove reshape
+ */
+static void rs_set_capacity(struct raid_set *rs)
+{
+ struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
+
+ set_capacity(gendisk, rs->md.array_sectors);
revalidate_disk(gendisk);
}
@@ -744,6 +764,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
mddev_init(&rs->md);
+ INIT_LIST_HEAD(&rs->list);
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
@@ -761,6 +782,9 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
+ /* Add @rs to global list. */
+ list_add(&rs->list, &raid_sets);
+
/*
* Remaining items to be initialized by further RAID params:
* rs->md.persistent
@@ -773,6 +797,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return rs;
}
+/* Free all @rs allocations and remove it from global list. */
static void raid_set_free(struct raid_set *rs)
{
int i;
@@ -790,6 +815,8 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
+ list_del(&rs->list);
+
kfree(rs);
}
@@ -1002,7 +1029,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
!rs->dev[i].rdev.sb_page)
rebuild_cnt++;
- switch (rs->raid_type->level) {
+ switch (rs->md.level) {
case 0:
break;
case 1:
@@ -1017,6 +1044,11 @@ static int validate_raid_redundancy(struct raid_set *rs)
break;
case 10:
copies = raid10_md_layout_to_copies(rs->md.new_layout);
+ if (copies < 2) {
+ DMERR("Bogus raid10 data copies < 2!");
+ return -EINVAL;
+ }
+
if (rebuild_cnt < copies)
break;
@@ -1576,6 +1608,24 @@ static sector_t __rdev_sectors(struct raid_set *rs)
return 0;
}
+/* Check that calculated dev_sectors fits all component devices. */
+static int _check_data_dev_sectors(struct raid_set *rs)
+{
+ sector_t ds = ~0;
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
+ ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+ if (ds < rs->md.dev_sectors) {
+ rs->ti->error = "Component device(s) too small";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/* Calculate the sectors per device and per array used for @rs */
static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
{
@@ -1625,7 +1675,7 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
- return 0;
+ return _check_data_dev_sectors(rs);
bad:
rs->ti->error = "Target length not divisible by number of data devices";
return -EINVAL;
@@ -1674,8 +1724,11 @@ static void do_table_event(struct work_struct *ws)
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
smp_rmb(); /* Make sure we access most actual mddev properties */
- if (!rs_is_reshaping(rs))
+ if (!rs_is_reshaping(rs)) {
+ if (rs_is_raid10(rs))
+ rs_set_rdev_sectors(rs);
rs_set_capacity(rs);
+ }
dm_table_event(rs->ti->table);
}
@@ -1860,7 +1913,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_takeover_requested(rs))
return false;
- if (!mddev->level)
+ if (rs_is_raid0(rs))
return false;
change = mddev->new_layout != mddev->layout ||
@@ -1868,7 +1921,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
rs->delta_disks;
/* Historical case to support raid1 reshape without delta disks */
- if (mddev->level == 1) {
+ if (rs_is_raid1(rs)) {
if (rs->delta_disks)
return !!rs->delta_disks;
@@ -1876,7 +1929,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
mddev->raid_disks != rs->raid_disks;
}
- if (mddev->level == 10)
+ if (rs_is_raid10(rs))
return change &&
!__is_raid10_far(mddev->new_layout) &&
rs->delta_disks >= 0;
@@ -2340,7 +2393,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
DMERR("new device%s provided without 'rebuild'",
new_devs > 1 ? "s" : "");
return -EINVAL;
- } else if (rs_is_recovering(rs)) {
+ } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
(unsigned long long) mddev->recovery_cp);
return -EINVAL;
@@ -2640,12 +2693,19 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
* Make sure we got a minimum amount of free sectors per device
*/
if (rs->data_offset &&
- to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
+ to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
rs->ti->error = data_offset ? "No space for forward reshape" :
"No space for backward reshape";
return -ENOSPC;
}
out:
+ /*
+ * Raise recovery_cp in case data_offset != 0 to
+ * avoid false recovery positives in the constructor.
+ */
+ if (rs->md.recovery_cp < rs->md.dev_sectors)
+ rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
+
/* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
if (!test_bit(Journal, &rdev->flags)) {
@@ -2682,14 +2742,14 @@ static int rs_setup_takeover(struct raid_set *rs)
sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
if (rt_is_raid10(rs->raid_type)) {
- if (mddev->level == 0) {
+ if (rs_is_raid0(rs)) {
/* Userpace reordered disks -> adjust raid_disk indexes */
__reorder_raid_disk_indexes(rs);
/* raid0 -> raid10_far layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
rs->raid10_copies);
- } else if (mddev->level == 1)
+ } else if (rs_is_raid1(rs))
/* raid1 -> raid10_near layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
rs->raid_disks);
@@ -2777,6 +2837,23 @@ static int rs_prepare_reshape(struct raid_set *rs)
return 0;
}
+/* Get reshape sectors from data_offsets or raid set */
+static sector_t _get_reshape_sectors(struct raid_set *rs)
+{
+ struct md_rdev *rdev;
+ sector_t reshape_sectors = 0;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags)) {
+ reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
+ rdev->data_offset - rdev->new_data_offset :
+ rdev->new_data_offset - rdev->data_offset;
+ break;
+ }
+
+ return max(reshape_sectors, (sector_t) rs->data_offset);
+}
+
/*
*
* - change raid layout
@@ -2788,6 +2865,7 @@ static int rs_setup_reshape(struct raid_set *rs)
{
int r = 0;
unsigned int cur_raid_devs, d;
+ sector_t reshape_sectors = _get_reshape_sectors(rs);
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
@@ -2804,13 +2882,13 @@ static int rs_setup_reshape(struct raid_set *rs)
/*
* Adjust array size:
*
- * - in case of adding disks, array size has
+ * - in case of adding disk(s), array size has
* to grow after the disk adding reshape,
* which'll hapen in the event handler;
* reshape will happen forward, so space has to
* be available at the beginning of each disk
*
- * - in case of removing disks, array size
+ * - in case of removing disk(s), array size
* has to shrink before starting the reshape,
* which'll happen here;
* reshape will happen backward, so space has to
@@ -2841,7 +2919,7 @@ static int rs_setup_reshape(struct raid_set *rs)
rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
}
- mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
+ mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
/* Remove disk(s) */
} else if (rs->delta_disks < 0) {
@@ -2874,6 +2952,15 @@ static int rs_setup_reshape(struct raid_set *rs)
mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
}
+ /*
+ * Adjust device size for forward reshape
+ * because md_finish_reshape() reduces it.
+ */
+ if (!mddev->reshape_backwards)
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags))
+ rdev->sectors += reshape_sectors;
+
return r;
}
@@ -2890,7 +2977,7 @@ static void configure_discard_support(struct raid_set *rs)
/*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
- raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
+ raid456 = rs_is_raid456(rs);
for (i = 0; i < rs->raid_disks; i++) {
struct request_queue *q;
@@ -2915,7 +3002,7 @@ static void configure_discard_support(struct raid_set *rs)
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
*/
- ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
+ ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs));
ti->num_discard_bios = 1;
}
@@ -2935,10 +3022,10 @@ static void configure_discard_support(struct raid_set *rs)
static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
- bool resize;
+ bool resize = false;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors, rdev_sectors;
+ sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3021,7 +3108,10 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- resize = calculated_dev_sectors != rdev_sectors;
+
+ reshape_sectors = _get_reshape_sectors(rs);
+ if (calculated_dev_sectors != rdev_sectors)
+ resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -3105,19 +3195,22 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- /*
- * We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
- * check functions via its MD personality instance.
- *
- * So do the reshape check after md_run() succeeded.
- */
- r = rs_prepare_reshape(rs);
- if (r)
- return r;
+ /* Out-of-place space has to be available to allow for a reshape unless raid1! */
+ if (reshape_sectors || rs_is_raid1(rs)) {
+ /*
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
+ r = rs_prepare_reshape(rs);
+ if (r)
+ return r;
- /* Reshaping ain't recovery, so disable recovery */
- rs_setup_recovery(rs, MaxSector);
+ /* Reshaping ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ }
rs_set_cur(rs);
} else {
/* May not set recovery when a device rebuild is requested */
@@ -3144,13 +3237,20 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_lock_nointr(&rs->md);
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
-
if (r) {
ti->error = "Failed to run raid array";
mddev_unlock(&rs->md);
goto bad;
}
+ r = md_start(&rs->md);
+
+ if (r) {
+ ti->error = "Failed to start raid array";
+ mddev_unlock(&rs->md);
+ goto bad_md_start;
+ }
+
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
@@ -3198,6 +3298,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_unlock(&rs->md);
return 0;
+bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
@@ -3239,25 +3340,27 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
}
/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev)
+static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
{
- if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_FROZEN, &recovery))
return "frozen";
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ /* The MD sync thread can be done with io but still be running */
+ if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
+ (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+ (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return "reshape";
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+ if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
return "resync";
- else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ else if (test_bit(MD_RECOVERY_CHECK, &recovery))
return "check";
return "repair";
}
- if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
return "recover";
}
@@ -3274,7 +3377,7 @@ static const char *decipher_sync_action(struct mddev *mddev)
* 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
* '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
*/
-static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, bool array_in_sync)
+static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
{
if (!rdev->bdev)
return "-";
@@ -3282,85 +3385,108 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
return "D";
else if (test_bit(Journal, &rdev->flags))
return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
- else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
+ else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
+ (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
+ !test_bit(In_sync, &rdev->flags)))
return "a";
else
return "A";
}
-/* Helper to return resync/reshape progress for @rs and @array_in_sync */
-static sector_t rs_get_progress(struct raid_set *rs,
- sector_t resync_max_sectors, bool *array_in_sync)
+/* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
+static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
+ sector_t resync_max_sectors)
{
- sector_t r, curr_resync_completed;
+ sector_t r;
struct mddev *mddev = &rs->md;
- curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- *array_in_sync = false;
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
if (rs_is_raid0(rs)) {
r = resync_max_sectors;
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- r = mddev->reshape_position;
-
- /* Reshape is relative to the array size */
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
- r != MaxSector) {
- if (r == MaxSector) {
- *array_in_sync = true;
- r = resync_max_sectors;
- } else {
- /* Got to reverse on backward reshape */
- if (mddev->reshape_backwards)
- r = mddev->array_sectors - r;
-
- /* Devide by # of data stripes */
- sector_div(r, mddev_data_stripes(rs));
- }
-
- /* Sync is relative to the component device size */
- } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- r = curr_resync_completed;
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+ test_bit(MD_RECOVERY_RUNNING, &recovery))
+ r = mddev->curr_resync_completed;
else
r = mddev->recovery_cp;
- if ((r == MaxSector) ||
- (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
- (mddev->curr_resync_completed == resync_max_sectors))) {
+ if (r >= resync_max_sectors &&
+ (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
+ (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
+ !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
+ !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
/*
* Sync complete.
*/
- *array_in_sync = true;
- r = resync_max_sectors;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* In case we have finished recovering, the array is in sync. */
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+ /*
+ * In case we are recovering, the array is not in sync
+ * and health chars should show the recovering legs.
+ */
+ ;
+
+ } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "resync" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "reshape" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
/*
* If "check" or "repair" is occurring, the raid set has
* undergone an initial sync and the health characters
* should not be 'a' anymore.
*/
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
} else {
struct md_rdev *rdev;
/*
+ * We are idle and recovery is needed, prevent 'A' chars race
+ * caused by components still set to in-sync by constrcuctor.
+ */
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery))
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ /*
* The raid set may be doing an initial sync, or it may
* be rebuilding individual components. If all the
* devices are In_sync, then it is the raid set that is
* being initialized.
*/
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags))
- *array_in_sync = true;
-#if 0
- r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
-#endif
+ !test_bit(In_sync, &rdev->flags)) {
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ break;
+ }
}
}
- return r;
+ return min(r, resync_max_sectors);
}
/* Helper to return @dev name or "-" if !@dev */
@@ -3376,7 +3502,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
struct mddev *mddev = &rs->md;
struct r5conf *conf = mddev->private;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
- bool array_in_sync;
+ unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
unsigned int sz = 0;
unsigned int rebuild_disks;
@@ -3396,17 +3522,18 @@ static void raid_status(struct dm_target *ti, status_type_t type,
/* Access most recent mddev properties for status output */
smp_rmb();
+ recovery = rs->md.recovery;
/* Get sensible max sectors even if raid set not yet started */
resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
mddev->resync_max_sectors : mddev->dev_sectors;
- progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
+ progress = rs_get_progress(rs, recovery, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = decipher_sync_action(&rs->md);
+ sync_action = decipher_sync_action(&rs->md, recovery);
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
- DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev, array_in_sync));
+ DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
/*
* In-sync/Reshape ratio:
@@ -3457,7 +3584,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* v1.10.0+:
*/
DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
- __raid_dev_status(rs, &rs->journal_dev.rdev, 0) : "-");
+ __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
break;
case STATUSTYPE_TABLE:
@@ -3613,24 +3740,19 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
}
-static void raid_presuspend(struct dm_target *ti)
-{
- struct raid_set *rs = ti->private;
-
- md_stop_writes(&rs->md);
-}
-
static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Writes have to be stopped before suspending to avoid deadlocks. */
+ if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
+ md_stop_writes(&rs->md);
+
mddev_lock_nointr(&rs->md);
mddev_suspend(&rs->md);
mddev_unlock(&rs->md);
}
-
- rs->md.ro = 1;
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
@@ -3807,10 +3929,33 @@ static int raid_preresume(struct dm_target *ti)
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- /* This is a resume after a suspend of the set -> it's already started */
+ /* This is a resume after a suspend of the set -> it's already started. */
if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
return 0;
+ if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ struct raid_set *rs_active = rs_find_active(rs);
+
+ if (rs_active) {
+ /*
+ * In case no rebuilds have been requested
+ * and an active table slot exists, copy
+ * current resynchonization completed and
+ * reshape position pointers across from
+ * suspended raid set in the active slot.
+ *
+ * This resumes the new mapping at current
+ * offsets to continue recover/reshape without
+ * necessarily redoing a raid set partially or
+ * causing data corruption in case of a reshape.
+ */
+ if (rs_active->md.curr_resync_completed != MaxSector)
+ mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
+ if (rs_active->md.reshape_position != MaxSector)
+ mddev->reshape_position = rs_active->md.reshape_position;
+ }
+ }
+
/*
* The superblocks need to be updated on disk if the
* array is new or new devices got added (thus zeroed
@@ -3842,11 +3987,10 @@ static int raid_preresume(struct dm_target *ti)
mddev->resync_min = mddev->recovery_cp;
}
- rs_set_capacity(rs);
-
/* Check for any reshape request unless new raid set */
- if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
/* Initiate a reshape. */
+ rs_set_rdev_sectors(rs);
mddev_lock_nointr(mddev);
r = rs_start_reshape(rs);
mddev_unlock(mddev);
@@ -3872,21 +4016,15 @@ static void raid_resume(struct dm_target *ti)
attempt_restore_of_faulty_devices(rs);
}
- mddev->ro = 0;
- mddev->in_sync = 0;
-
- /*
- * Keep the RAID set frozen if reshape/rebuild flags are set.
- * The RAID set is unfrozen once the next table load/resume,
- * which clears the reshape/rebuild flags, occurs.
- * This ensures that the constructor for the inactive table
- * retrieves an up-to-date reshape_position.
- */
- if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Only reduce raid set size before running a disk removing reshape. */
+ if (mddev->delta_disks < 0)
+ rs_set_capacity(rs);
+
mddev_lock_nointr(mddev);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ mddev->ro = 0;
+ mddev->in_sync = 0;
mddev_resume(mddev);
mddev_unlock(mddev);
}
@@ -3894,7 +4032,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 13, 0},
+ .version = {1, 13, 2},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
@@ -3903,7 +4041,6 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
- .presuspend = raid_presuspend,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d32f25489c2..aeaaaef43eff 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -315,6 +315,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
break;
+ case DM_ENDIO_DELAY_REQUEUE:
+ /* The target wants to requeue the I/O after a delay */
+ dm_requeue_original_request(tio, true);
+ break;
default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -395,7 +399,7 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
+static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
blk_status_t r;
@@ -404,9 +408,10 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
- if (r)
+ if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
/* must complete clone in terms of original request */
dm_complete_request(rq, r);
+ return r;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
@@ -476,8 +481,10 @@ static int map_request(struct dm_rq_target_io *tio)
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request *clone = NULL;
+ blk_status_t ret;
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+check_again:
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
@@ -492,7 +499,17 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- dm_dispatch_clone_request(clone, rq);
+ ret = dm_dispatch_clone_request(clone, rq);
+ if (ret == BLK_STS_RESOURCE) {
+ blk_rq_unprep_clone(clone);
+ tio->ti->type->release_clone_rq(clone);
+ tio->clone = NULL;
+ if (!rq->q->mq_ops)
+ r = DM_MAPIO_DELAY_REQUEUE;
+ else
+ r = DM_MAPIO_REQUEUE;
+ goto check_again;
+ }
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
@@ -700,7 +717,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
/* disable dm_old_request_fn's merge heuristic by default */
md->seq_rq_merge_deadline_usecs = 0;
- dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
/* Initialize the request-based DM worker thread */
@@ -713,8 +729,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return error;
}
- elv_register_queue(md->queue);
-
return 0;
}
@@ -810,17 +824,9 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
err = PTR_ERR(q);
goto out_tag_set;
}
- dm_init_md_queue(md);
-
- /* backfill 'mq' sysfs registration normally done in blk_register_queue */
- err = blk_mq_register_dev(disk_to_dev(md->disk), q);
- if (err)
- goto out_cleanup_queue;
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(q);
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 7b8642045c55..f006a9005593 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -282,9 +282,6 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list)
if (!best || (st_compare_load(pi, best, nr_bytes) < 0))
best = pi;
@@ -292,6 +289,9 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a0613bd8ed00..216035be5661 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -47,7 +47,7 @@ struct dm_exception_table {
};
struct dm_snapshot {
- struct rw_semaphore lock;
+ struct mutex lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -439,9 +439,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- down_read(&s->lock);
+ mutex_lock(&s->lock);
active = s->active;
- up_read(&s->lock);
+ mutex_unlock(&s->lock);
if (active) {
if (snap_src)
@@ -909,7 +909,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -924,7 +924,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
if (b)
flush_bios(b);
@@ -983,9 +983,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
goto shut;
}
@@ -1026,10 +1026,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1071,10 +1071,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1173,7 +1173,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
- init_rwsem(&s->lock);
+ mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1338,9 +1338,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- down_write(&snap_dest->lock);
+ mutex_lock(&snap_dest->lock);
snap_dest->valid = 0;
- up_write(&snap_dest->lock);
+ mutex_unlock(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1371,6 +1371,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ mutex_destroy(&s->lock);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
@@ -1458,7 +1460,7 @@ static void pending_complete(void *context, int success)
if (!success) {
/* Read/write error - snapshot is unusable */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -EIO);
error = 1;
goto out;
@@ -1466,14 +1468,14 @@ static void pending_complete(void *context, int success)
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
error = 1;
goto out;
}
*e = pe->e;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid) {
free_completed_exception(e);
error = 1;
@@ -1498,7 +1500,7 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@@ -1694,7 +1696,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
/* FIXME: should only take write lock if we need
* to copy an exception */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1717,9 +1719,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
pe = alloc_pending_exception(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
@@ -1754,7 +1756,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_full_bio(pe, bio);
goto out;
}
@@ -1764,7 +1766,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_copy(pe);
goto out;
}
@@ -1774,7 +1776,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
out:
return r;
}
@@ -1810,7 +1812,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1841,12 +1843,12 @@ redirect_to_origin:
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return do_origin(s->origin, bio);
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return r;
}
@@ -1878,7 +1880,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_read(&snap_src->lock);
+ mutex_lock(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1888,7 +1890,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- up_read(&snap_src->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1934,11 +1936,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_write(&snap_src->lock);
- down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&snap_src->lock);
+ mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- up_write(&snap_dest->lock);
- up_write(&snap_src->lock);
+ mutex_unlock(&snap_dest->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1953,9 +1955,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->active = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -1995,7 +1997,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2020,7 +2022,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
break;
@@ -2086,7 +2088,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
@@ -2113,9 +2115,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
pe = alloc_pending_exception(snap);
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid) {
free_pending_exception(pe);
@@ -2158,7 +2160,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 29bc51084c82..56059fb56e2d 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -228,6 +228,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
dm_stat_free(&s->rcu_head);
}
free_percpu(stats->last);
+ mutex_destroy(&stats->mutex);
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index aaffd0c0ee9a..5fe7ec356c33 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -866,7 +866,8 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
- table_type == DM_TYPE_DAX_BIO_BASED);
+ table_type == DM_TYPE_DAX_BIO_BASED ||
+ table_type == DM_TYPE_NVME_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -909,13 +910,33 @@ static bool dm_table_supports_dax(struct dm_table *t)
return true;
}
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
+
+struct verify_rq_based_data {
+ unsigned sq_count;
+ unsigned mq_count;
+};
+
+static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct verify_rq_based_data *v = data;
+
+ if (q->mq_ops)
+ v->mq_count++;
+ else
+ v->sq_count++;
+
+ return queue_is_rq_based(q);
+}
+
static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- unsigned sq_count = 0, mq_count = 0;
+ struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
- struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -923,6 +944,14 @@ static int dm_table_determine_type(struct dm_table *t)
/* target already set the table's type */
if (t->type == DM_TYPE_BIO_BASED)
return 0;
+ else if (t->type == DM_TYPE_NVME_BIO_BASED) {
+ if (!dm_table_does_not_support_partial_completion(t)) {
+ DMERR("nvme bio-based is only possible with devices"
+ " that don't support partial completion");
+ return -EINVAL;
+ }
+ /* Fallthru, also verify all devices are blk-mq */
+ }
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
goto verify_rq_based;
}
@@ -937,8 +966,8 @@ static int dm_table_determine_type(struct dm_table *t)
bio_based = 1;
if (bio_based && request_based) {
- DMWARN("Inconsistent table: different target types"
- " can't be mixed up");
+ DMERR("Inconsistent table: different target types"
+ " can't be mixed up");
return -EINVAL;
}
}
@@ -959,8 +988,18 @@ static int dm_table_determine_type(struct dm_table *t)
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
if (dm_table_supports_dax(t) ||
- (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
+ } else {
+ /* Check if upgrading to NVMe bio-based is valid or required */
+ tgt = dm_table_get_immutable_target(t);
+ if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
+ } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ }
+ }
return 0;
}
@@ -980,7 +1019,8 @@ verify_rq_based:
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
- DMWARN("Request-based dm doesn't support multiple targets yet");
+ DMERR("%s DM doesn't support multiple targets",
+ t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
return -EINVAL;
}
@@ -997,28 +1037,29 @@ verify_rq_based:
return 0;
}
- /* Non-request-stackable devices can't be used for request-based dm */
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
-
- if (!queue_is_rq_based(q)) {
- DMERR("table load rejected: including"
- " non-request-stackable devices");
- return -EINVAL;
- }
+ tgt = dm_table_get_immutable_target(t);
+ if (!tgt) {
+ DMERR("table load rejected: immutable target is required");
+ return -EINVAL;
+ } else if (tgt->max_io_len) {
+ DMERR("table load rejected: immutable target that splits IO is not supported");
+ return -EINVAL;
+ }
- if (q->mq_ops)
- mq_count++;
- else
- sq_count++;
+ /* Non-request-stackable devices can't be used for request-based dm */
+ if (!tgt->type->iterate_devices ||
+ !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
+ DMERR("table load rejected: including non-request-stackable devices");
+ return -EINVAL;
}
- if (sq_count && mq_count) {
+ if (v.sq_count && v.mq_count) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
- t->all_blk_mq = mq_count > 0;
+ t->all_blk_mq = v.mq_count > 0;
- if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ if (!t->all_blk_mq &&
+ (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
DMERR("table load rejected: all devices are not blk-mq request-stackable");
return -EINVAL;
}
@@ -1079,7 +1120,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned per_io_data_size = 0;
- struct dm_target *tgt;
+ unsigned min_pool_size = 0;
+ struct dm_target *ti;
unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
@@ -1089,11 +1131,13 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
if (__table_type_bio_based(type))
for (i = 0; i < t->num_targets; i++) {
- tgt = t->targets + i;
- per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
+ ti = t->targets + i;
+ per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
+ min_pool_size = max(min_pool_size, ti->num_flush_bios);
}
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
+ per_io_data_size, min_pool_size);
if (!t->mempools)
return -ENOMEM;
@@ -1705,6 +1749,20 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return true;
}
+static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ char b[BDEVNAME_SIZE];
+
+ /* For now, NVMe devices are the only devices of this class */
+ return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0);
+}
+
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
+{
+ return dm_table_all_devices_attribute(t, device_no_partial_completion);
+}
+
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1820,6 +1878,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index f91d771fff4b..629c555890c1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -492,6 +492,11 @@ static void pool_table_init(void)
INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}
+static void pool_table_exit(void)
+{
+ mutex_destroy(&dm_thin_pool_table.mutex);
+}
+
static void __pool_table_insert(struct pool *pool)
{
BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
@@ -1717,7 +1722,7 @@ static void __remap_and_issue_shared_cell(void *context,
bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
- struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
inc_all_io_entry(info->tc->pool, bio);
@@ -4387,6 +4392,8 @@ static void dm_thin_exit(void)
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
+
+ pool_table_exit();
}
module_init(dm_thin_init);
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
new file mode 100644
index 000000000000..65f838fa2e99
--- /dev/null
+++ b/drivers/md/dm-unstripe.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2017 Intel Corporation.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/device-mapper.h>
+
+struct unstripe_c {
+ struct dm_dev *dev;
+ sector_t physical_start;
+
+ uint32_t stripes;
+
+ uint32_t unstripe;
+ sector_t unstripe_width;
+ sector_t unstripe_offset;
+
+ uint32_t chunk_size;
+ u8 chunk_shift;
+};
+
+#define DM_MSG_PREFIX "unstriped"
+
+static void cleanup_unstripe(struct unstripe_c *uc, struct dm_target *ti)
+{
+ if (uc->dev)
+ dm_put_device(ti, uc->dev);
+ kfree(uc);
+}
+
+/*
+ * Contruct an unstriped mapping.
+ * <number of stripes> <chunk size> <stripe #> <dev_path> <offset>
+ */
+static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct unstripe_c *uc;
+ sector_t tmp_len;
+ unsigned long long start;
+ char dummy;
+
+ if (argc != 5) {
+ ti->error = "Invalid number of arguments";
+ return -EINVAL;
+ }
+
+ uc = kzalloc(sizeof(*uc), GFP_KERNEL);
+ if (!uc) {
+ ti->error = "Memory allocation for unstriped context failed";
+ return -ENOMEM;
+ }
+
+ if (kstrtouint(argv[0], 10, &uc->stripes) || !uc->stripes) {
+ ti->error = "Invalid stripe count";
+ goto err;
+ }
+
+ if (kstrtouint(argv[1], 10, &uc->chunk_size) || !uc->chunk_size) {
+ ti->error = "Invalid chunk_size";
+ goto err;
+ }
+
+ // FIXME: must support non power of 2 chunk_size, dm-stripe.c does
+ if (!is_power_of_2(uc->chunk_size)) {
+ ti->error = "Non power of 2 chunk_size is not supported yet";
+ goto err;
+ }
+
+ if (kstrtouint(argv[2], 10, &uc->unstripe)) {
+ ti->error = "Invalid stripe number";
+ goto err;
+ }
+
+ if (uc->unstripe > uc->stripes && uc->stripes > 1) {
+ ti->error = "Please provide stripe between [0, # of stripes]";
+ goto err;
+ }
+
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &uc->dev)) {
+ ti->error = "Couldn't get striped device";
+ goto err;
+ }
+
+ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
+ ti->error = "Invalid striped device offset";
+ goto err;
+ }
+ uc->physical_start = start;
+
+ uc->unstripe_offset = uc->unstripe * uc->chunk_size;
+ uc->unstripe_width = (uc->stripes - 1) * uc->chunk_size;
+ uc->chunk_shift = fls(uc->chunk_size) - 1;
+
+ tmp_len = ti->len;
+ if (sector_div(tmp_len, uc->chunk_size)) {
+ ti->error = "Target length not divisible by chunk size";
+ goto err;
+ }
+
+ if (dm_set_target_max_io_len(ti, uc->chunk_size)) {
+ ti->error = "Failed to set max io len";
+ goto err;
+ }
+
+ ti->private = uc;
+ return 0;
+err:
+ cleanup_unstripe(uc, ti);
+ return -EINVAL;
+}
+
+static void unstripe_dtr(struct dm_target *ti)
+{
+ struct unstripe_c *uc = ti->private;
+
+ cleanup_unstripe(uc, ti);
+}
+
+static sector_t map_to_core(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+ sector_t sector = bio->bi_iter.bi_sector;
+
+ /* Shift us up to the right "row" on the stripe */
+ sector += uc->unstripe_width * (sector >> uc->chunk_shift);
+
+ /* Account for what stripe we're operating on */
+ sector += uc->unstripe_offset;
+
+ return sector;
+}
+
+static int unstripe_map(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+
+ bio_set_dev(bio, uc->dev->bdev);
+ bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start;
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void unstripe_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct unstripe_c *uc = ti->private;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%d %llu %d %s %llu",
+ uc->stripes, (unsigned long long)uc->chunk_size, uc->unstripe,
+ uc->dev->name, (unsigned long long)uc->physical_start);
+ break;
+ }
+}
+
+static int unstripe_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct unstripe_c *uc = ti->private;
+
+ return fn(ti, uc->dev, uc->physical_start, ti->len, data);
+}
+
+static void unstripe_io_hints(struct dm_target *ti,
+ struct queue_limits *limits)
+{
+ struct unstripe_c *uc = ti->private;
+
+ limits->chunk_sectors = uc->chunk_size;
+}
+
+static struct target_type unstripe_target = {
+ .name = "unstriped",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = unstripe_ctr,
+ .dtr = unstripe_dtr,
+ .map = unstripe_map,
+ .status = unstripe_status,
+ .iterate_devices = unstripe_iterate_devices,
+ .io_hints = unstripe_io_hints,
+};
+
+static int __init dm_unstripe_init(void)
+{
+ int r;
+
+ r = dm_register_target(&unstripe_target);
+ if (r < 0)
+ DMERR("target registration failed");
+
+ return r;
+}
+
+static void __exit dm_unstripe_exit(void)
+{
+ dm_unregister_target(&unstripe_target);
+}
+
+module_init(dm_unstripe_init);
+module_exit(dm_unstripe_exit);
+
+MODULE_DESCRIPTION(DM_NAME " unstriped target");
+MODULE_AUTHOR("Scott Bauer <scott.bauer@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 70485de37b66..969954915566 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2333,6 +2333,9 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
/* Free the zone descriptors */
dmz_drop_zones(zmd);
+
+ mutex_destroy(&zmd->mblk_flush_lock);
+ mutex_destroy(&zmd->map_lock);
}
/*
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6d7bda6f8190..caff02caf083 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -827,6 +827,7 @@ err_fwq:
err_cwq:
destroy_workqueue(dmz->chunk_wq);
err_bio:
+ mutex_destroy(&dmz->chunk_lock);
bioset_free(dmz->bio_set);
err_meta:
dmz_dtr_metadata(dmz->metadata);
@@ -861,6 +862,8 @@ static void dmz_dtr(struct dm_target *ti)
dmz_put_zoned_device(ti);
+ mutex_destroy(&dmz->chunk_lock);
+
kfree(dmz);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index de17b7193299..d6de00f367ef 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,18 +60,73 @@ void dm_issue_global_event(void)
}
/*
- * One of these is allocated per bio.
+ * One of these is allocated (on-stack) per original bio.
*/
+struct clone_info {
+ struct dm_table *map;
+ struct bio *bio;
+ struct dm_io *io;
+ sector_t sector;
+ unsigned sector_count;
+};
+
+/*
+ * One of these is allocated per clone bio.
+ */
+#define DM_TIO_MAGIC 7282014
+struct dm_target_io {
+ unsigned magic;
+ struct dm_io *io;
+ struct dm_target *ti;
+ unsigned target_bio_nr;
+ unsigned *len_ptr;
+ bool inside_dm_io;
+ struct bio clone;
+};
+
+/*
+ * One of these is allocated per original bio.
+ * It contains the first clone used for that original.
+ */
+#define DM_IO_MAGIC 5191977
struct dm_io {
+ unsigned magic;
struct mapped_device *md;
blk_status_t status;
atomic_t io_count;
- struct bio *bio;
+ struct bio *orig_bio;
unsigned long start_time;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
+ /* last member of dm_target_io is 'struct bio' */
+ struct dm_target_io tio;
};
+void *dm_per_bio_data(struct bio *bio, size_t data_size)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ if (!tio->inside_dm_io)
+ return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
+ return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
+}
+EXPORT_SYMBOL_GPL(dm_per_bio_data);
+
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+{
+ struct dm_io *io = (struct dm_io *)((char *)data + data_size);
+ if (io->magic == DM_IO_MAGIC)
+ return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
+ BUG_ON(io->magic != DM_TIO_MAGIC);
+ return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
+}
+EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
+
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
+{
+ return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
+}
+EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -93,8 +148,8 @@ static int dm_numa_node = DM_NUMA_NODE;
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
- mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *io_bs;
};
struct table_device {
@@ -103,7 +158,6 @@ struct table_device {
struct dm_dev dm_dev;
};
-static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_cache;
@@ -170,14 +224,9 @@ static int __init local_init(void)
{
int r = -ENOMEM;
- /* allocate a slab for the dm_ios */
- _io_cache = KMEM_CACHE(dm_io, 0);
- if (!_io_cache)
- return r;
-
_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
if (!_rq_tio_cache)
- goto out_free_io_cache;
+ return r;
_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
__alignof__(struct request), 0, NULL);
@@ -212,8 +261,6 @@ out_free_rq_cache:
kmem_cache_destroy(_rq_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
-out_free_io_cache:
- kmem_cache_destroy(_io_cache);
return r;
}
@@ -225,7 +272,6 @@ static void local_exit(void)
kmem_cache_destroy(_rq_cache);
kmem_cache_destroy(_rq_tio_cache);
- kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
dm_uevent_exit();
@@ -486,18 +532,69 @@ out:
return r;
}
-static struct dm_io *alloc_io(struct mapped_device *md)
+static void start_io_acct(struct dm_io *io);
+
+static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{
- return mempool_alloc(md->io_pool, GFP_NOIO);
+ struct dm_io *io;
+ struct dm_target_io *tio;
+ struct bio *clone;
+
+ clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = true;
+ tio->io = NULL;
+
+ io = container_of(tio, struct dm_io, tio);
+ io->magic = DM_IO_MAGIC;
+ io->status = 0;
+ atomic_set(&io->io_count, 1);
+ io->orig_bio = bio;
+ io->md = md;
+ spin_lock_init(&io->endio_lock);
+
+ start_io_acct(io);
+
+ return io;
}
static void free_io(struct mapped_device *md, struct dm_io *io)
{
- mempool_free(io, md->io_pool);
+ bio_put(&io->tio.clone);
+}
+
+static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+ unsigned target_bio_nr, gfp_t gfp_mask)
+{
+ struct dm_target_io *tio;
+
+ if (!ci->io->tio.io) {
+ /* the dm_target_io embedded in ci->io is available */
+ tio = &ci->io->tio;
+ } else {
+ struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = false;
+ }
+
+ tio->magic = DM_TIO_MAGIC;
+ tio->io = ci->io;
+ tio->ti = ti;
+ tio->target_bio_nr = target_bio_nr;
+
+ return tio;
}
static void free_tio(struct dm_target_io *tio)
{
+ if (tio->inside_dm_io)
+ return;
bio_put(&tio->clone);
}
@@ -510,17 +607,15 @@ int md_in_flight(struct mapped_device *md)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
- int cpu;
+ struct bio *bio = io->orig_bio;
int rw = bio_data_dir(bio);
io->start_time = jiffies;
- cpu = part_stat_lock();
- part_round_stats(md->queue, cpu, &dm_disk(md)->part0);
- part_stat_unlock();
+ generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0);
+
atomic_set(&dm_disk(md)->part0.in_flight[rw],
- atomic_inc_return(&md->pending[rw]));
+ atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -531,7 +626,7 @@ static void start_io_acct(struct dm_io *io)
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
+ struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
int pending;
int rw = bio_data_dir(bio);
@@ -752,15 +847,6 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
return 0;
}
-/*-----------------------------------------------------------------
- * CRUD START:
- * A more elegant soln is in the works that uses the queue
- * merge fn, unfortunately there are a couple of changes to
- * the block layer that I want to make for this. So in the
- * interests of getting something for people to use I give
- * you this clearly demarcated crap.
- *---------------------------------------------------------------*/
-
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -780,8 +866,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
- if (!(io->status == BLK_STS_DM_REQUEUE &&
- __noflush_suspending(md)))
+ if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
@@ -793,7 +878,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
+ /* NOTE early return due to BLK_STS_DM_REQUEUE below */
+ bio_list_add_head(&md->deferred, io->orig_bio);
else
/* noflush suspend was interrupted. */
io->status = BLK_STS_IOERR;
@@ -801,7 +887,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
io_error = io->status;
- bio = io->bio;
+ bio = io->orig_bio;
end_io_acct(io);
free_io(md, io);
@@ -847,7 +933,7 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (unlikely(error == BLK_STS_TARGET)) {
+ if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
@@ -920,7 +1006,15 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- ti->max_io_len = (uint32_t) len;
+ /*
+ * BIO based queue uses its own splitting. When multipage bvecs
+ * is switched on, size of the incoming bio may be too big to
+ * be handled in some targets, such as crypt.
+ *
+ * When these targets are ready for the big bio, we can remove
+ * the limit.
+ */
+ ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
return 0;
}
@@ -997,7 +1091,7 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH.
+ * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1047,7 +1141,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->bio;
+ struct bio *report_bio = tio->io->orig_bio;
struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
unsigned int nr_rep = 0;
@@ -1114,67 +1208,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
-/*
- * Flush current->bio_list when the target map method blocks.
- * This fixes deadlocks in snapshot and possibly in other targets.
- */
-struct dm_offload {
- struct blk_plug plug;
- struct blk_plug_cb cb;
-};
-
-static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct dm_offload *o = container_of(cb, struct dm_offload, cb);
- struct bio_list list;
- struct bio *bio;
- int i;
-
- INIT_LIST_HEAD(&o->cb.list);
-
- if (unlikely(!current->bio_list))
- return;
-
- for (i = 0; i < 2; i++) {
- list = current->bio_list[i];
- bio_list_init(&current->bio_list[i]);
-
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set ||
- !bs->rescue_workqueue) {
- bio_list_add(&current->bio_list[i], bio);
- continue;
- }
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
- }
- }
-}
-
-static void dm_offload_start(struct dm_offload *o)
-{
- blk_start_plug(&o->plug);
- o->cb.callback = flush_current_bio_list;
- list_add(&o->cb.list, &current->plug->cb_list);
-}
-
-static void dm_offload_end(struct dm_offload *o)
-{
- list_del(&o->cb.list);
- blk_finish_plug(&o->plug);
-}
-
-static void __map_bio(struct dm_target_io *tio)
+static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
- struct dm_offload o;
struct bio *clone = &tio->clone;
+ struct dm_io *io = tio->io;
+ struct mapped_device *md = io->md;
struct dm_target *ti = tio->ti;
+ blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@@ -1183,44 +1225,37 @@ static void __map_bio(struct dm_target_io *tio)
* anything, the target has assumed ownership of
* this io.
*/
- atomic_inc(&tio->io->io_count);
+ atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
- dm_offload_start(&o);
r = ti->type->map(ti, clone);
- dm_offload_end(&o);
-
switch (r) {
case DM_MAPIO_SUBMITTED:
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
- bio_dev(tio->io->bio), sector);
- generic_make_request(clone);
+ bio_dev(io->orig_bio), sector);
+ if (md->type == DM_TYPE_NVME_BIO_BASED)
+ ret = direct_make_request(clone);
+ else
+ ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
- dec_pending(tio->io, BLK_STS_IOERR);
free_tio(tio);
+ dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
- dec_pending(tio->io, BLK_STS_DM_REQUEUE);
free_tio(tio);
+ dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
-}
-struct clone_info {
- struct mapped_device *md;
- struct dm_table *map;
- struct bio *bio;
- struct dm_io *io;
- sector_t sector;
- unsigned sector_count;
-};
+ return ret;
+}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
{
@@ -1264,28 +1299,49 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return 0;
}
-static struct dm_target_io *alloc_tio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr)
+static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ struct dm_target *ti, unsigned num_bios)
{
struct dm_target_io *tio;
- struct bio *clone;
+ int try;
- clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
- tio = container_of(clone, struct dm_target_io, clone);
+ if (!num_bios)
+ return;
- tio->io = ci->io;
- tio->ti = ti;
- tio->target_bio_nr = target_bio_nr;
+ if (num_bios == 1) {
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ bio_list_add(blist, &tio->clone);
+ return;
+ }
- return tio;
+ for (try = 0; try < 2; try++) {
+ int bio_nr;
+ struct bio *bio;
+
+ if (try)
+ mutex_lock(&ci->io->md->table_devices_lock);
+ for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
+ tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
+ if (!tio)
+ break;
+
+ bio_list_add(blist, &tio->clone);
+ }
+ if (try)
+ mutex_unlock(&ci->io->md->table_devices_lock);
+ if (bio_nr == num_bios)
+ return;
+
+ while ((bio = bio_list_pop(blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ free_tio(tio);
+ }
+ }
}
-static void __clone_and_map_simple_bio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr, unsigned *len)
+static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
+ struct dm_target_io *tio, unsigned *len)
{
- struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
struct bio *clone = &tio->clone;
tio->len_ptr = len;
@@ -1294,16 +1350,22 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
if (len)
bio_setup_sector(clone, ci->sector, *len);
- __map_bio(tio);
+ return __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
- unsigned target_bio_nr;
+ struct bio_list blist = BIO_EMPTY_LIST;
+ struct bio *bio;
+ struct dm_target_io *tio;
+
+ alloc_multiple_bios(&blist, ci, ti, num_bios);
- for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
- __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
+ while ((bio = bio_list_pop(&blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ (void) __clone_and_map_simple_bio(ci, tio, len);
+ }
}
static int __send_empty_flush(struct clone_info *ci)
@@ -1319,32 +1381,22 @@ static int __send_empty_flush(struct clone_info *ci)
}
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned *len)
+ sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
- unsigned target_bio_nr;
- unsigned num_target_bios = 1;
- int r = 0;
+ int r;
- /*
- * Does the target want to receive duplicate copies of the bio?
- */
- if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
- num_target_bios = ti->num_write_bios(ti, bio);
-
- for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, target_bio_nr);
- tio->len_ptr = len;
- r = clone_bio(tio, bio, sector, *len);
- if (r < 0) {
- free_tio(tio);
- break;
- }
- __map_bio(tio);
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ tio->len_ptr = len;
+ r = clone_bio(tio, bio, sector, *len);
+ if (r < 0) {
+ free_tio(tio);
+ return r;
}
+ (void) __map_bio(tio);
- return r;
+ return 0;
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1371,56 +1423,50 @@ static bool is_split_required_for_discard(struct dm_target *ti)
return ti->split_discard_bios;
}
-static int __send_changing_extent_only(struct clone_info *ci,
+static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
get_num_bios_fn get_num_bios,
is_split_required_fn is_split_required)
{
- struct dm_target *ti;
unsigned len;
unsigned num_bios;
- do {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- /*
- * Even though the device advertised support for this type of
- * request, that does not mean every target supports it, and
- * reconfiguration might also have changed that since the
- * check was performed.
- */
- num_bios = get_num_bios ? get_num_bios(ti) : 0;
- if (!num_bios)
- return -EOPNOTSUPP;
+ /*
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
+ * check was performed.
+ */
+ num_bios = get_num_bios ? get_num_bios(ti) : 0;
+ if (!num_bios)
+ return -EOPNOTSUPP;
- if (is_split_required && !is_split_required(ti))
- len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
- else
- len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
+ if (is_split_required && !is_split_required(ti))
+ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ else
+ len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
- __send_duplicate_bios(ci, ti, num_bios, &len);
+ __send_duplicate_bios(ci, ti, num_bios, &len);
- ci->sector += len;
- } while (ci->sector_count -= len);
+ ci->sector += len;
+ ci->sector_count -= len;
return 0;
}
-static int __send_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_discard_bios,
+ return __send_changing_extent_only(ci, ti, get_num_discard_bios,
is_split_required_for_discard);
}
-static int __send_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL);
}
-static int __send_write_zeroes(struct clone_info *ci)
+static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
}
/*
@@ -1433,17 +1479,17 @@ static int __split_and_process_non_flush(struct clone_info *ci)
unsigned len;
int r;
- if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
- return __send_discard(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- return __send_write_same(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
- return __send_write_zeroes(ci);
-
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+ return __send_discard(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
+ return __send_write_same(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
+ return __send_write_zeroes(ci, ti);
+
if (bio_op(bio) == REQ_OP_ZONE_REPORT)
len = ci->sector_count;
else
@@ -1460,34 +1506,33 @@ static int __split_and_process_non_flush(struct clone_info *ci)
return 0;
}
+static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ ci->map = map;
+ ci->io = alloc_io(md, bio);
+ ci->sector = bio->bi_iter.bi_sector;
+}
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static void __split_and_process_bio(struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
+static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
if (unlikely(!map)) {
bio_io_error(bio);
- return;
+ return ret;
}
- ci.map = map;
- ci.md = md;
- ci.io = alloc_io(md);
- ci.io->status = 0;
- atomic_set(&ci.io->io_count, 1);
- ci.io->bio = bio;
- ci.io->md = md;
- spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_iter.bi_sector;
-
- start_io_acct(ci.io);
+ init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- ci.bio = &ci.md->flush_bio;
+ ci.bio = &ci.io->md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
@@ -1498,32 +1543,95 @@ static void __split_and_process_bio(struct mapped_device *md,
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- while (ci.sector_count && !error)
+ while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
+ if (current->bio_list && ci.sector_count && !error) {
+ /*
+ * Remainder must be passed to generic_make_request()
+ * so that it gets handled *after* bios already submitted
+ * have been completely processed.
+ * We take a clone of the original to store in
+ * ci.io->orig_bio to be used by end_io_acct() and
+ * for dec_pending to use for completion handling.
+ * As this path is not used for REQ_OP_ZONE_REPORT,
+ * the usage of io->orig_bio in dm_remap_zone_report()
+ * won't be affected by this reassignment.
+ */
+ struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
+ md->queue->bio_split);
+ ci.io->orig_bio = b;
+ bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
+ bio_chain(b, bio);
+ ret = generic_make_request(bio);
+ break;
+ }
+ }
}
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
}
-/*-----------------------------------------------------------------
- * CRUD END
- *---------------------------------------------------------------*/
/*
- * The request function that just remaps the bio built up by
- * dm_merge_bvec.
+ * Optimized variant of __split_and_process_bio that leverages the
+ * fact that targets that use it do _not_ have a need to split bios.
*/
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t __process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
+ int error = 0;
+
+ if (unlikely(!map)) {
+ bio_io_error(bio);
+ return ret;
+ }
+
+ init_clone_info(&ci, md, map, bio);
+
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ ci.bio = &ci.io->md->flush_bio;
+ ci.sector_count = 0;
+ error = __send_empty_flush(&ci);
+ /* dec_pending submits any data associated with flush */
+ } else {
+ struct dm_target *ti = md->immutable_target;
+ struct dm_target_io *tio;
+
+ /*
+ * Defend against IO still getting in during teardown
+ * - as was seen for a time with nvme-fcloop
+ */
+ if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
+ error = -EIO;
+ goto out;
+ }
+
+ tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
+ ci.bio = bio;
+ ci.sector_count = bio_sectors(bio);
+ ret = __clone_and_map_simple_bio(&ci, tio, NULL);
+ }
+out:
+ /* drop the extra reference count */
+ dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
+}
+
+typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
+
+static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
+ process_bio_fn process_bio)
{
- int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0);
-
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
@@ -1532,12 +1640,27 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
queue_io(md, bio);
else
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return ret;
}
- __split_and_process_bio(md, map, bio);
+ ret = process_bio(md, map, bio);
+
dm_put_live_table(md, srcu_idx);
- return BLK_QC_T_NONE;
+ return ret;
+}
+
+/*
+ * The request function that remaps the bio to one target and
+ * splits off any remainder.
+ */
+static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __split_and_process_bio);
+}
+
+static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __process_bio);
}
static int dm_any_congested(void *congested_data, int bdi_bits)
@@ -1618,20 +1741,9 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
-void dm_init_md_queue(struct mapped_device *md)
-{
- /*
- * Initialize data that will only be used by a non-blk-mq DM queue
- * - must do so here (in alloc_dev callchain) before queue is used
- */
- md->queue->queuedata = md;
- md->queue->backing_dev_info->congested_data = md;
-}
-
-void dm_init_normal_md_queue(struct mapped_device *md)
+static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
- dm_init_md_queue(md);
/*
* Initialize aspects of queue that aren't relevant for blk-mq
@@ -1645,9 +1757,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
destroy_workqueue(md->wq);
if (md->kworker_task)
kthread_stop(md->kworker_task);
- mempool_destroy(md->io_pool);
if (md->bs)
bioset_free(md->bs);
+ if (md->io_bs)
+ bioset_free(md->io_bs);
if (md->dax_dev) {
kill_dax(md->dax_dev);
@@ -1673,6 +1786,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->bdev = NULL;
}
+ mutex_destroy(&md->suspend_lock);
+ mutex_destroy(&md->type_lock);
+ mutex_destroy(&md->table_devices_lock);
+
dm_mq_cleanup_mapped_device(md);
}
@@ -1726,10 +1843,10 @@ static struct mapped_device *alloc_dev(int minor)
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info->congested_data = md;
- dm_init_md_queue(md);
-
- md->disk = alloc_disk_node(1, numa_node_id);
+ md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
goto bad;
@@ -1753,7 +1870,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
md->dax_dev = dax_dev;
- add_disk(md->disk);
+ add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@@ -1812,17 +1929,22 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->bs) {
- /* The md already has necessary mempools. */
- if (dm_table_bio_based(t)) {
- /*
- * Reload bioset because front_pad may have changed
- * because a different table was loaded.
- */
+ if (dm_table_bio_based(t)) {
+ /*
+ * The md may already have mempools that need changing.
+ * If so, reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ if (md->bs) {
bioset_free(md->bs);
- md->bs = p->bs;
- p->bs = NULL;
+ md->bs = NULL;
}
+ if (md->io_bs) {
+ bioset_free(md->io_bs);
+ md->io_bs = NULL;
+ }
+
+ } else if (md->bs) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
@@ -1834,13 +1956,12 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out;
}
- BUG_ON(!p || md->io_pool || md->bs);
+ BUG_ON(!p || md->bs || md->io_bs);
- md->io_pool = p->io_pool;
- p->io_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
-
+ md->io_bs = p->io_bs;
+ p->io_bs = NULL;
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
@@ -1886,6 +2007,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
{
struct dm_table *old_map;
struct request_queue *q = md->queue;
+ bool request_based = dm_table_request_based(t);
sector_t size;
lockdep_assert_held(&md->suspend_lock);
@@ -1909,12 +2031,15 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t)) {
+ if (request_based)
dm_stop_queue(q);
+
+ if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
/*
- * Leverage the fact that request-based DM targets are
- * immutable singletons and establish md->immutable_target
- * - used to optimize both dm_request_fn and dm_mq_queue_rq
+ * Leverage the fact that request-based DM targets and
+ * NVMe bio based targets are immutable singletons
+ * - used to optimize both dm_request_fn and dm_mq_queue_rq;
+ * and __process_bio.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@@ -1954,13 +2079,18 @@ static struct dm_table *__unbind(struct mapped_device *md)
*/
int dm_create(int minor, struct mapped_device **result)
{
+ int r;
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
- dm_sysfs_init(md);
+ r = dm_sysfs_init(md);
+ if (r) {
+ free_dev(md);
+ return r;
+ }
*result = md;
return 0;
@@ -2013,10 +2143,12 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
+ struct queue_limits limits;
enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
case DM_TYPE_REQUEST_BASED:
+ dm_init_normal_md_queue(md);
r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
@@ -2034,21 +2166,24 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
- /*
- * DM handles splitting bios as needed. Free the bio_split bioset
- * since it won't be used (saves 1 process per bio-based DM device).
- */
- bioset_free(md->queue->bio_split);
- md->queue->bio_split = NULL;
-
- if (type == DM_TYPE_DAX_BIO_BASED)
- queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
+ break;
+ case DM_TYPE_NVME_BIO_BASED:
+ dm_init_normal_md_queue(md);
+ blk_queue_make_request(md->queue, dm_make_request_nvme);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
}
+ r = dm_calculate_queue_limits(t, &limits);
+ if (r) {
+ DMERR("Cannot calculate initial queue limits");
+ return r;
+ }
+ dm_table_set_restrictions(t, md->queue, &limits);
+ blk_register_queue(md->disk);
+
return 0;
}
@@ -2113,7 +2248,6 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
- struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -2124,7 +2258,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(q);
+ blk_set_queue_dying(md->queue);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
@@ -2735,11 +2869,12 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_io_data_size)
+ unsigned integrity, unsigned per_io_data_size,
+ unsigned min_pool_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0;
- unsigned int front_pad;
+ unsigned int front_pad, io_front_pad;
if (!pools)
return NULL;
@@ -2747,16 +2882,19 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- pool_size = dm_get_reserved_bio_based_ios();
+ case DM_TYPE_NVME_BIO_BASED:
+ pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
-
- pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
- if (!pools->io_pool)
+ io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
+ pools->io_bs = bioset_create(pool_size, io_front_pad, 0);
+ if (!pools->io_bs)
+ goto out;
+ if (integrity && bioset_integrity_create(pools->io_bs, pool_size))
goto out;
break;
case DM_TYPE_REQUEST_BASED:
case DM_TYPE_MQ_REQUEST_BASED:
- pool_size = dm_get_reserved_rq_based_ios();
+ pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
@@ -2764,7 +2902,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
BUG();
}
- pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER);
+ pools->bs = bioset_create(pool_size, front_pad, 0);
if (!pools->bs)
goto out;
@@ -2784,10 +2922,10 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
return;
- mempool_destroy(pools->io_pool);
-
if (pools->bs)
bioset_free(pools->bs);
+ if (pools->io_bs)
+ bioset_free(pools->io_bs);
kfree(pools);
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 36399bb875dd..114a81b27c37 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -49,7 +49,6 @@ struct dm_md_mempools;
/*-----------------------------------------------------------------
* Internal table functions.
*---------------------------------------------------------------*/
-void dm_table_destroy(struct dm_table *t);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
@@ -206,7 +205,8 @@ void dm_kcopyd_exit(void);
* Mempool operations
*/
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_bio_data_size);
+ unsigned integrity, unsigned per_bio_data_size,
+ unsigned min_pool_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e4dee0ec2de..0081ace39a64 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -711,7 +711,7 @@ static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
return NULL;
}
-static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
@@ -721,6 +721,7 @@ static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
return NULL;
}
+EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
static struct md_personality *find_pers(int level, char *clevel)
{
@@ -5560,11 +5561,6 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
- /*
- * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
- * up mddev->thread. It is important to initialize critical
- * resources for mddev->thread BEFORE calling pers->run().
- */
err = pers->run(mddev);
if (err)
pr_warn("md: pers->run() failed ...\n");
@@ -5678,6 +5674,9 @@ static int do_md_run(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_allow_write(mddev);
+ /* run start up tasks that require md_thread */
+ md_start(mddev);
+
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
@@ -5689,6 +5688,21 @@ out:
return err;
}
+int md_start(struct mddev *mddev)
+{
+ int ret = 0;
+
+ if (mddev->pers->start) {
+ set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ ret = mddev->pers->start(mddev);
+ clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->sync_thread);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(md_start);
+
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -6997,7 +7011,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
return -ENODEV;
rcu_read_lock();
- rdev = find_rdev_rcu(mddev, dev);
+ rdev = md_find_rdev_rcu(mddev, dev);
if (!rdev)
err = -ENODEV;
else {
@@ -7871,10 +7885,10 @@ static int md_seq_open(struct inode *inode, struct file *file)
}
static int md_unloading;
-static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
- int mask;
+ __poll_t mask;
if (md_unloading)
return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
@@ -8169,7 +8183,8 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
return;
if (mddev->ro) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7d6bcf0eba0c..58cd20a5e85e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -485,6 +485,7 @@ enum recovery_flags {
MD_RECOVERY_RESHAPE, /* A reshape is happening */
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+ MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
};
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -523,7 +524,13 @@ struct md_personality
struct list_head list;
struct module *owner;
bool (*make_request)(struct mddev *mddev, struct bio *bio);
+ /*
+ * start up works that do NOT require md_thread. tasks that
+ * requires md_thread should go into start()
+ */
int (*run)(struct mddev *mddev);
+ /* start up works that require md threads */
+ int (*start)(struct mddev *mddev);
void (*free)(struct mddev *mddev, void *priv);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
@@ -687,6 +694,7 @@ extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
+extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
@@ -702,6 +710,7 @@ extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6df398e3a008..b2eae332e1a2 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -815,6 +815,17 @@ static void flush_pending_writes(struct r1conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
flush_bio_list(conf, bio);
blk_finish_plug(&plug);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c131835cf008..99c9207899a7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -900,6 +900,18 @@ static void flush_pending_writes(struct r10conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
+
blk_start_plug(&plug);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 39f31f07ffe9..3c65f52b68f5 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1111,9 +1111,6 @@ void r5l_write_stripe_run(struct r5l_log *log)
int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
- if (!log)
- return -ENODEV;
-
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
/*
* in write through (journal only)
@@ -1592,8 +1589,6 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
void r5l_quiesce(struct r5l_log *log, int quiesce)
{
struct mddev *mddev;
- if (!log)
- return;
if (quiesce) {
/* make sure r5l_write_super_and_discard_space exits */
@@ -2448,7 +2443,6 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
raid5_release_stripe(sh);
}
- md_wakeup_thread(conf->mddev->thread);
/* reuse conf->wait_for_quiescent in recovery */
wait_event(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0);
@@ -2491,10 +2485,10 @@ static int r5l_recovery_log(struct r5l_log *log)
ctx->seq += 10000;
if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
- pr_debug("md/raid:%s: starting from clean shutdown\n",
+ pr_info("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
else
- pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
+ pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx->data_only_stripes,
ctx->data_parity_stripes);
@@ -3036,6 +3030,23 @@ ioerr:
return ret;
}
+int r5l_start(struct r5l_log *log)
+{
+ int ret;
+
+ if (!log)
+ return 0;
+
+ ret = r5l_load_log(log);
+ if (ret) {
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ r5l_exit_log(conf);
+ }
+ return ret;
+}
+
void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
@@ -3138,13 +3149,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
rcu_assign_pointer(conf->log, log);
- if (r5l_load_log(log))
- goto error;
-
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
-error:
rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 284578b0a349..0c76bcedfc1c 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -32,6 +32,7 @@ extern struct md_sysfs_entry r5c_journal_mode;
extern void r5c_update_on_rdev_error(struct mddev *mddev,
struct md_rdev *rdev);
extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+extern int r5l_start(struct r5l_log *log);
extern struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
@@ -42,6 +43,7 @@ extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
extern void ppl_write_stripe_run(struct r5conf *conf);
extern void ppl_stripe_write_finished(struct stripe_head *sh);
extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
+extern void ppl_quiesce(struct r5conf *conf, int quiesce);
static inline bool raid5_has_ppl(struct r5conf *conf)
{
@@ -87,6 +89,34 @@ static inline void log_write_stripe_run(struct r5conf *conf)
ppl_write_stripe_run(conf);
}
+static inline void log_flush_stripe_to_raid(struct r5conf *conf)
+{
+ if (conf->log)
+ r5l_flush_stripe_to_raid(conf->log);
+ else if (raid5_has_ppl(conf))
+ ppl_write_stripe_run(conf);
+}
+
+static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
+{
+ int ret = -ENODEV;
+
+ if (conf->log)
+ ret = r5l_handle_flush_request(conf->log, bio);
+ else if (raid5_has_ppl(conf))
+ ret = 0;
+
+ return ret;
+}
+
+static inline void log_quiesce(struct r5conf *conf, int quiesce)
+{
+ if (conf->log)
+ r5l_quiesce(conf->log, quiesce);
+ else if (raid5_has_ppl(conf))
+ ppl_quiesce(conf, quiesce);
+}
+
static inline void log_exit(struct r5conf *conf)
{
if (conf->log)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 628c0bf7b9fd..2764c2290062 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -85,6 +85,9 @@
* (for a single member disk). New io_units are added to the end of the list
* and the first io_unit is submitted, if it is not submitted already.
* The current io_unit accepting new stripes is always at the end of the list.
+ *
+ * If write-back cache is enabled for any of the disks in the array, its data
+ * must be flushed before next io_unit is submitted.
*/
#define PPL_SPACE_SIZE (128 * 1024)
@@ -104,6 +107,7 @@ struct ppl_conf {
struct kmem_cache *io_kc;
mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *flush_bs;
/* used only for recovery */
int recovered_entries;
@@ -128,6 +132,8 @@ struct ppl_log {
sector_t next_io_sector;
unsigned int entry_space;
bool use_multippl;
+ bool wb_cache_on;
+ unsigned long disk_flush_bitmap;
};
#define PPL_IO_INLINE_BVECS 32
@@ -145,6 +151,7 @@ struct ppl_io_unit {
struct list_head stripe_list; /* stripes added to the io_unit */
atomic_t pending_stripes; /* how many stripes not written to raid */
+ atomic_t pending_flushes; /* how many disk flushes are in progress */
bool submitted; /* true if write to log started */
@@ -249,6 +256,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
+ atomic_set(&io->pending_flushes, 0);
bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
pplhdr = page_address(io->header_page);
@@ -475,7 +483,18 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (log->use_multippl)
log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
+ WARN_ON(log->disk_flush_bitmap != 0);
+
list_for_each_entry(sh, &io->stripe_list, log_list) {
+ for (i = 0; i < sh->disks; i++) {
+ struct r5dev *dev = &sh->dev[i];
+
+ if ((ppl_conf->child_logs[i].wb_cache_on) &&
+ (test_bit(R5_Wantwrite, &dev->flags))) {
+ set_bit(i, &log->disk_flush_bitmap);
+ }
+ }
+
/* entries for full stripe writes have no partial parity */
if (test_bit(STRIPE_FULL_WRITE, &sh->state))
continue;
@@ -540,6 +559,7 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
{
struct ppl_log *log = io->log;
struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
unsigned long flags;
pr_debug("%s: seq: %llu\n", __func__, io->seq);
@@ -565,6 +585,112 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
spin_unlock(&ppl_conf->no_mem_stripes_lock);
local_irq_restore(flags);
+
+ wake_up(&conf->wait_for_quiescent);
+}
+
+static void ppl_flush_endio(struct bio *bio)
+{
+ struct ppl_io_unit *io = bio->bi_private;
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ char b[BDEVNAME_SIZE];
+
+ pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
+
+ if (bio->bi_status) {
+ struct md_rdev *rdev;
+
+ rcu_read_lock();
+ rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
+ if (rdev)
+ md_error(rdev->mddev, rdev);
+ rcu_read_unlock();
+ }
+
+ bio_put(bio);
+
+ if (atomic_dec_and_test(&io->pending_flushes)) {
+ ppl_io_unit_finished(io);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+static void ppl_do_flush(struct ppl_io_unit *io)
+{
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ int raid_disks = conf->raid_disks;
+ int flushed_disks = 0;
+ int i;
+
+ atomic_set(&io->pending_flushes, raid_disks);
+
+ for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
+ struct md_rdev *rdev;
+ struct block_device *bdev = NULL;
+
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ bdev = rdev->bdev;
+ rcu_read_unlock();
+
+ if (bdev) {
+ struct bio *bio;
+ char b[BDEVNAME_SIZE];
+
+ bio = bio_alloc_bioset(GFP_NOIO, 0, ppl_conf->flush_bs);
+ bio_set_dev(bio, bdev);
+ bio->bi_private = io;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_end_io = ppl_flush_endio;
+
+ pr_debug("%s: dev: %s\n", __func__,
+ bio_devname(bio, b));
+
+ submit_bio(bio);
+ flushed_disks++;
+ }
+ }
+
+ log->disk_flush_bitmap = 0;
+
+ for (i = flushed_disks ; i < raid_disks; i++) {
+ if (atomic_dec_and_test(&io->pending_flushes))
+ ppl_io_unit_finished(io);
+ }
+}
+
+static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
+ struct ppl_log *log)
+{
+ struct ppl_io_unit *io;
+
+ io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
+ log_sibling);
+
+ return !io || !io->submitted;
+}
+
+void ppl_quiesce(struct r5conf *conf, int quiesce)
+{
+ struct ppl_conf *ppl_conf = conf->log_private;
+ int i;
+
+ if (quiesce) {
+ for (i = 0; i < ppl_conf->count; i++) {
+ struct ppl_log *log = &ppl_conf->child_logs[i];
+
+ spin_lock_irq(&log->io_list_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ ppl_no_io_unit_submitted(conf, log),
+ log->io_list_lock);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
}
void ppl_stripe_write_finished(struct stripe_head *sh)
@@ -574,8 +700,12 @@ void ppl_stripe_write_finished(struct stripe_head *sh)
io = sh->ppl_io;
sh->ppl_io = NULL;
- if (io && atomic_dec_and_test(&io->pending_stripes))
- ppl_io_unit_finished(io);
+ if (io && atomic_dec_and_test(&io->pending_stripes)) {
+ if (io->log->disk_flush_bitmap)
+ ppl_do_flush(io);
+ else
+ ppl_io_unit_finished(io);
+ }
}
static void ppl_xor(int size, struct page *page1, struct page *page2)
@@ -1108,6 +1238,8 @@ static void __ppl_exit_log(struct ppl_conf *ppl_conf)
if (ppl_conf->bs)
bioset_free(ppl_conf->bs);
+ if (ppl_conf->flush_bs)
+ bioset_free(ppl_conf->flush_bs);
mempool_destroy(ppl_conf->io_pool);
kmem_cache_destroy(ppl_conf->io_kc);
@@ -1173,6 +1305,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
{
+ struct request_queue *q;
+
if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
PPL_HEADER_SIZE) * 2) {
log->use_multippl = true;
@@ -1185,6 +1319,10 @@ static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
PPL_HEADER_SIZE;
}
log->next_io_sector = rdev->ppl.sector;
+
+ q = bdev_get_queue(rdev->bdev);
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ log->wb_cache_on = true;
}
int ppl_init_log(struct r5conf *conf)
@@ -1192,8 +1330,8 @@ int ppl_init_log(struct r5conf *conf)
struct ppl_conf *ppl_conf;
struct mddev *mddev = conf->mddev;
int ret = 0;
+ int max_disks;
int i;
- bool need_cache_flush = false;
pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
mdname(conf->mddev));
@@ -1219,6 +1357,14 @@ int ppl_init_log(struct r5conf *conf)
return -EINVAL;
}
+ max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) *
+ BITS_PER_BYTE;
+ if (conf->raid_disks > max_disks) {
+ pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
+ mdname(mddev), max_disks);
+ return -EINVAL;
+ }
+
ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
if (!ppl_conf)
return -ENOMEM;
@@ -1244,6 +1390,12 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
+ ppl_conf->flush_bs = bioset_create(conf->raid_disks, 0, 0);
+ if (!ppl_conf->flush_bs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ppl_conf->count = conf->raid_disks;
ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
GFP_KERNEL);
@@ -1275,23 +1427,14 @@ int ppl_init_log(struct r5conf *conf)
log->rdev = rdev;
if (rdev) {
- struct request_queue *q;
-
ret = ppl_validate_rdev(rdev);
if (ret)
goto err;
- q = bdev_get_queue(rdev->bdev);
- if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- need_cache_flush = true;
ppl_init_child_log(log, rdev);
}
}
- if (need_cache_flush)
- pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n",
- mdname(mddev));
-
/* load and possibly recover the logs from the member disks */
ret = ppl_load(ppl_conf);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 98ce4272ace9..50d01144b805 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5563,7 +5563,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
- int ret = r5l_handle_flush_request(conf->log, bi);
+ int ret = log_handle_flush_request(conf, bi);
if (ret == 0)
return true;
@@ -6168,7 +6168,7 @@ static int handle_active_stripes(struct r5conf *conf, int group,
break;
if (i == NR_STRIPE_HASH_LOCKS) {
spin_unlock_irq(&conf->device_lock);
- r5l_flush_stripe_to_raid(conf->log);
+ log_flush_stripe_to_raid(conf);
spin_lock_irq(&conf->device_lock);
return batch_size;
}
@@ -8060,7 +8060,7 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce)
wake_up(&conf->wait_for_overlap);
unlock_all_device_hash_locks_irq(conf);
}
- r5l_quiesce(conf->log, quiesce);
+ log_quiesce(conf, quiesce);
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
@@ -8364,6 +8364,13 @@ static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
return err;
}
+static int raid5_start(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ return r5l_start(conf->log);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -8371,6 +8378,7 @@ static struct md_personality raid6_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8395,6 +8403,7 @@ static struct md_personality raid5_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8420,6 +8429,7 @@ static struct md_personality raid4_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 3dba3aa34a43..9d6c496f756c 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -43,13 +43,13 @@ static inline struct cec_devnode *cec_devnode_data(struct file *filp)
/* CEC file operations */
-static unsigned int cec_poll(struct file *filp,
+static __poll_t cec_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct cec_devnode *devnode = cec_devnode_data(filp);
struct cec_fh *fh = filp->private_data;
struct cec_adapter *adap = fh->adap;
- unsigned int res = 0;
+ __poll_t res = 0;
if (!devnode->registered)
return POLLERR | POLLHUP;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 8c87d6837c49..8ee3eebef4db 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -320,13 +320,13 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
return res;
}
-static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t __fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
DEB_EE("file:%p, poll:%p\n", file, wait);
@@ -359,10 +359,10 @@ static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wai
return res;
}
-static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
- unsigned int res;
+ __poll_t res;
mutex_lock(vdev->lock);
res = __fops_poll(file, wait);
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 1a8677ade391..0c0878bcf251 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -374,7 +374,7 @@ exit:
return rc;
}
-static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
+static __poll_t smsdvb_stats_poll(struct file *file, poll_table *wait)
{
struct smsdvb_debugfs *debug_data = file->private_data;
int rc;
@@ -384,12 +384,9 @@ static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
poll_wait(file, &debug_data->stats_queue, wait);
rc = smsdvb_stats_wait_read(debug_data);
- if (rc > 0)
- rc = POLLIN | POLLRDNORM;
-
kref_put(&debug_data->refcount, smsdvb_debugfs_data_release);
- return rc;
+ return rc > 0 ? POLLIN | POLLRDNORM : 0;
}
static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 3ddd44e1ee77..3fe0eb740a6d 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1066,10 +1066,10 @@ static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
}
-static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
return POLLERR;
@@ -1160,11 +1160,11 @@ static long dvb_dvr_ioctl(struct file *file,
return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
}
-static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk("%s\n", __func__);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index d48b61eb01f4..3f6c8bd8f869 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1782,11 +1782,11 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
*
* return: Standard poll mask.
*/
-static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int slot;
int result = 0;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2afaa8226342..48e16fd003a7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2470,7 +2470,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
}
-static unsigned int dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index edbb30fdd9d9..fb8a1d2ffd24 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -207,7 +207,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
return err;
}
-static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
+static __poll_t fdtv_ca_io_poll(struct file *file, poll_table *wait)
{
return POLLIN;
}
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 423248f577b6..3049b1f505e5 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -99,7 +99,7 @@ static ssize_t media_write(struct file *filp, const char __user *buf,
return devnode->fops->write(filp, buf, sz, off);
}
-static unsigned int media_poll(struct file *filp,
+static __poll_t media_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct media_devnode *devnode = media_devnode_data(filp);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index b366a7e1d976..c988669e22ff 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2955,13 +2955,13 @@ static ssize_t bttv_read(struct file *file, char __user *data,
return retval;
}
-static unsigned int bttv_poll(struct file *file, poll_table *wait)
+static __poll_t bttv_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv_buffer *buf;
enum v4l2_field field;
- unsigned int rc = 0;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t rc = 0;
+ __poll_t req_events = poll_requested_events(wait);
if (v4l2_event_pending(&fh->fh))
rc = POLLPRI;
@@ -3329,13 +3329,13 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa6588_command cmd;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 4f9c2395941b..2b0abd5bbf64 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -602,14 +602,14 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
}
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx18_open_id *id = file2id(filp);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) &&
@@ -629,7 +629,7 @@ unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(id->type == CX18_ENC_STREAM_TYPE_YUV)) {
- int videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
+ __poll_t videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
if (v4l2_event_pending(&id->fh))
res |= POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 37ef34e866cb..5b44d30efd8f 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -23,7 +23,7 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t cx18_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t *pos);
int cx18_v4l2_close(struct file *filp);
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
int cx18_start_capture(struct cx18_open_id *id);
void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index f4bd4908acdd..09a25d6c2cd1 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -732,13 +732,13 @@ static ssize_t ts_read(struct file *file, __user char *buf,
return (count && (left == count)) ? -EAGAIN : (count - left);
}
-static unsigned int ts_poll(struct file *file, poll_table *wait)
+static __poll_t ts_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct ddb_output *output = dvbdev->priv;
struct ddb_input *input = output->port->input[0];
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &input->dma->wq, wait);
poll_wait(file, &output->dma->wq, wait);
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index c9bd018e53de..4aa773507201 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -730,12 +730,12 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
return res;
}
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
- int res = 0;
+ __poll_t res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
@@ -764,14 +764,14 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
return res;
}
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.h b/drivers/media/pci/ivtv/ivtv-fileops.h
index 5e08800772ca..e0029b2b8d09 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.h
+++ b/drivers/media/pci/ivtv/ivtv-fileops.h
@@ -28,8 +28,8 @@ ssize_t ivtv_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t * pos);
int ivtv_v4l2_close(struct file *filp);
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
int ivtv_start_capture(struct ivtv_open_id *id);
void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end);
int ivtv_start_decoding(struct ivtv_open_id *id, int speed);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 23999a8cef37..f74b08635082 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1423,9 +1423,9 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
}
-static unsigned int meye_poll(struct file *file, poll_table *wait)
+static __poll_t meye_poll(struct file *file, poll_table *wait)
{
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 82d2a24644e4..0ceaa3473cf2 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1227,11 +1227,11 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
- unsigned int rc = v4l2_ctrl_poll(file, wait);
+ __poll_t rc = v4l2_ctrl_poll(file, wait);
cmd.instance = file;
cmd.event_list = wait;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index f21c245a54f7..e7b31a5b14fd 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -909,13 +909,13 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa7164_encoder_fh *fh =
(struct saa7164_encoder_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = v4l2_ctrl_poll(file, wait);
+ __poll_t mask = v4l2_ctrl_poll(file, wait);
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 9255d7d23947..6f97c8f2e00d 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -614,11 +614,11 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = 0;
+ __poll_t mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 2aa4ba675194..4d10e2f979d2 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -937,11 +937,11 @@ static int dvb_video_get_event (struct av7110 *av7110, struct video_event *event
* DVB device file operations
******************************************************************************/
-static unsigned int dvb_video_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_video_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
@@ -989,11 +989,11 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
}
-static unsigned int dvb_audio_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_audio_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index 1fe49171d823..96ca227cf51b 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -223,13 +223,13 @@ static int dvb_ca_open(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
+static __poll_t dvb_ca_poll (struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
struct dvb_ringbuffer *rbuf = &av7110->ci_rbuffer;
struct dvb_ringbuffer *wbuf = &av7110->ci_wbuffer;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(8, "av7110:%p\n",av7110);
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index d07840072337..b6a6c4f171d0 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2501,13 +2501,13 @@ static int zoran_s_jpegcomp(struct file *file, void *__fh,
return res;
}
-static unsigned int
+static __poll_t
zoran_poll (struct file *file,
poll_table *wait)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
- int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
int frame;
unsigned long flags;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 7b3f6f8e3dc8..cf65b39807fe 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -730,7 +730,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll: It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 2a2994ef15d5..b2dc524112f7 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -707,12 +707,12 @@ static int gsc_m2m_release(struct file *file)
return 0;
}
-static unsigned int gsc_m2m_poll(struct file *file,
+static __poll_t gsc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
- unsigned int ret;
+ __poll_t ret;
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index dba21215dc84..de285a269390 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1263,13 +1263,13 @@ static ssize_t viu_read(struct file *file, char __user *data, size_t count,
return 0;
}
-static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t viu_poll(struct file *file, struct poll_table_struct *wait)
{
struct viu_fh *fh = file->private_data;
struct videobuf_queue *q = &fh->vb_vidq;
struct viu_dev *dev = fh->dev;
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index c8a12493f395..945ef1e2ccc7 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -950,11 +950,11 @@ static int deinterlace_release(struct file *file)
return 0;
}
-static unsigned int deinterlace_poll(struct file *file,
+static __poll_t deinterlace_poll(struct file *file,
struct poll_table_struct *wait)
{
struct deinterlace_ctx *ctx = file->private_data;
- int ret;
+ __poll_t ret;
deinterlace_lock(ctx);
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 4a2b1afa19c4..5a8eff60e95f 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -838,12 +838,12 @@ static int emmaprp_release(struct file *file)
return 0;
}
-static unsigned int emmaprp_poll(struct file *file,
+static __poll_t emmaprp_poll(struct file *file,
struct poll_table_struct *wait)
{
struct emmaprp_dev *pcdev = video_drvdata(file);
struct emmaprp_ctx *ctx = file->private_data;
- unsigned int res;
+ __poll_t res;
mutex_lock(&pcdev->dev_mutex);
res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 6f1b0c799e58..abb14ee20538 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -839,7 +839,7 @@ static void omap_vout_buffer_release(struct videobuf_queue *q,
/*
* File operations
*/
-static unsigned int omap_vout_poll(struct file *file,
+static __poll_t omap_vout_poll(struct file *file,
struct poll_table_struct *wait)
{
struct omap_vout_device *vout = file->private_data;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 218e6d7ae93a..a751c89a3ea8 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1383,11 +1383,11 @@ static int isp_video_release(struct file *file)
return 0;
}
-static unsigned int isp_video_poll(struct file *file, poll_table *wait)
+static __poll_t isp_video_poll(struct file *file, poll_table *wait)
{
struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
struct isp_video *video = video_drvdata(file);
- int ret;
+ __poll_t ret;
mutex_lock(&video->queue_lock);
ret = vb2_poll(&vfh->queue, file, wait);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 25c7a7d42292..437395a61065 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -590,12 +590,12 @@ static int s3c_camif_close(struct file *file)
return ret;
}
-static unsigned int s3c_camif_poll(struct file *file,
+static __poll_t s3c_camif_poll(struct file *file,
struct poll_table_struct *wait)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
- int ret;
+ __poll_t ret;
mutex_lock(&camif->lock);
if (vp->owner && vp->owner != file->private_data)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index bc68dbbcaec1..fe94bd6b705e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -988,14 +988,14 @@ static int s5p_mfc_release(struct file *file)
}
/* Poll */
-static unsigned int s5p_mfc_poll(struct file *file,
+static __poll_t s5p_mfc_poll(struct file *file,
struct poll_table_struct *wait)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
struct s5p_mfc_dev *dev = ctx->dev;
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
mutex_lock(&dev->mfc_mutex);
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index dedc1b024f6f..976ea0bb5b6c 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1016,7 +1016,7 @@ static int sh_veu_release(struct file *file)
return 0;
}
-static unsigned int sh_veu_poll(struct file *file,
+static __poll_t sh_veu_poll(struct file *file,
struct poll_table_struct *wait)
{
struct sh_veu_file *veu_file = file->private_data;
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 36762ec954e7..9b069783e3ed 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1553,7 +1553,7 @@ static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
return ret;
}
-static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
+static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 916ff68b73d4..d964c072832c 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -805,11 +805,11 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
return err;
}
-static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
+static __poll_t soc_camera_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- unsigned res = POLLERR;
+ __poll_t res = POLLERR;
if (icd->streamer != file)
return POLLERR;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 805d4a8fc17e..f77be9302120 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -764,7 +764,7 @@ out_unlock:
}
-static unsigned int viacam_poll(struct file *filp, struct poll_table_struct *pt)
+static __poll_t viacam_poll(struct file *filp, struct poll_table_struct *pt)
{
struct via_camera *cam = video_drvdata(filp);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 5f316a5e38db..c80239592088 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -416,7 +416,7 @@ static ssize_t vivid_radio_write(struct file *file, const char __user *buf,
return vivid_radio_tx_write(file, buf, size, offset);
}
-static unsigned int vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c
index 47c36c26096b..71f3ebb7aecf 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.c
+++ b/drivers/media/platform/vivid/vivid-radio-rx.c
@@ -141,7 +141,7 @@ retry:
return i;
}
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.h b/drivers/media/platform/vivid/vivid-radio-rx.h
index 1077d8f061eb..2b33edb60942 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.h
+++ b/drivers/media/platform/vivid/vivid-radio-rx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_RX_H_
ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c
index 0e8025b7b4dd..f0917f4e7d8c 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.c
+++ b/drivers/media/platform/vivid/vivid-radio-tx.c
@@ -105,7 +105,7 @@ retry:
return i;
}
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.h b/drivers/media/platform/vivid/vivid-radio-tx.h
index 7f8ff7547119..3c3343d70cbc 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.h
+++ b/drivers/media/platform/vivid/vivid-radio-tx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_TX_H_
ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 7575e5370a49..dba611003a00 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -481,11 +481,11 @@ static int cadet_release(struct file *file)
return 0;
}
-static unsigned int cadet_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t cadet_poll(struct file *file, struct poll_table_struct *wait)
{
struct cadet *dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
poll_wait(file, &dev->read_queue, wait);
if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) {
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 540ac887a63c..49293dd707b9 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1153,12 +1153,12 @@ static ssize_t si476x_radio_fops_read(struct file *file, char __user *buf,
return rval;
}
-static unsigned int si476x_radio_fops_poll(struct file *file,
+static __poll_t si476x_radio_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si476x_radio *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- unsigned int err = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t err = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
if (atomic_read(&radio->core->is_alive))
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 3cbdc085c65d..f92b0f9241a9 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1089,7 +1089,7 @@ out:
return r;
}
-static unsigned int wl1273_fm_fops_poll(struct file *file,
+static __poll_t wl1273_fm_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index c89a7d5b8c55..68fe9e5c7a70 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -507,12 +507,12 @@ done:
/*
* si470x_fops_poll - poll RDS data
*/
-static unsigned int si470x_fops_poll(struct file *file,
+static __poll_t si470x_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si470x_device *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- int retval = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t retval = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
/* switch on rds reception */
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index fc5a7abc83d2..fd603c1b96bb 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -102,7 +102,7 @@ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
return sizeof(rds);
}
-static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
{
int ret;
struct fmdev *fmdev;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index e16d1138ca48..aab53641838b 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -272,10 +272,10 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(lirc_dev_fop_close);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
+__poll_t lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
struct lirc_dev *d = file->private_data;
- unsigned int ret;
+ __poll_t ret;
if (!d->attached)
return POLLHUP | POLLERR;
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
index 81f72c0b561f..ab238ac8bfc0 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/media/usb/cpia2/cpia2.h
@@ -444,7 +444,7 @@ int cpia2_allocate_buffers(struct camera_data *cam);
void cpia2_free_buffers(struct camera_data *cam);
long cpia2_read(struct camera_data *cam,
char __user *buf, unsigned long count, int noblock);
-unsigned int cpia2_poll(struct camera_data *cam,
+__poll_t cpia2_poll(struct camera_data *cam,
struct file *filp, poll_table *wait);
int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
void cpia2_set_property_flip(struct camera_data *cam, int prop_val);
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index 0efba0da0a45..e7524920c618 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -2370,10 +2370,10 @@ long cpia2_read(struct camera_data *cam,
* cpia2_poll
*
*****************************************************************************/
-unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
+__poll_t cpia2_poll(struct camera_data *cam, struct file *filp,
poll_table *wait)
{
- unsigned int status = v4l2_ctrl_poll(filp, wait);
+ __poll_t status = v4l2_ctrl_poll(filp, wait);
if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) &&
!cam->streaming) {
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 3dedd83f0b19..74c97565ccc6 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -169,10 +169,10 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
* cpia2_v4l_poll
*
*****************************************************************************/
-static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
{
struct camera_data *cam = video_drvdata(filp);
- unsigned int res;
+ __poll_t res;
mutex_lock(&cam->v4l2_lock);
res = cpia2_poll(cam, filp, wait);
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index d538fa407742..103e3299b77f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1812,13 +1812,13 @@ static ssize_t mpeg_read(struct file *file, char __user *data,
file->f_flags & O_NONBLOCK);
}
-static unsigned int mpeg_poll(struct file *file,
+static __poll_t mpeg_poll(struct file *file,
struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = file->private_data;
struct cx231xx *dev = fh->dev;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res |= POLLPRI;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 226059fc672b..d7b2e694bbb9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -2006,12 +2006,12 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
* cx231xx_v4l2_poll()
* will allocate buffers when called for the first time
*/
-static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
+static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
- unsigned res = 0;
+ __poll_t res = 0;
int rc;
rc = check_dev(dev);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 961343873fd0..b72d02e225fd 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1862,11 +1862,11 @@ out:
return ret;
}
-static unsigned int dev_poll(struct file *file, poll_table *wait)
+static __poll_t dev_poll(struct file *file, poll_table *wait)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- int ret = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t ret = 0;
PDEBUG(D_FRAM, "poll");
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 7fb036d6a86e..d0d638c2e900 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -521,12 +521,12 @@ err:
return ret;
}
-static unsigned int hdpvr_poll(struct file *filp, poll_table *wait)
+static __poll_t hdpvr_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct hdpvr_buffer *buf = NULL;
struct hdpvr_device *dev = video_drvdata(filp);
- unsigned int mask = v4l2_ctrl_poll(filp, wait);
+ __poll_t mask = v4l2_ctrl_poll(filp, wait);
if (!(req_events & (POLLIN | POLLRDNORM)))
return mask;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 4320bda9352d..11cdfe356801 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1190,9 +1190,9 @@ static ssize_t pvr2_v4l2_read(struct file *file,
}
-static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t pvr2_v4l2_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct pvr2_v4l2_fh *fh = file->private_data;
int ret;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c0bba773db25..cba0916d33e5 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -721,10 +721,10 @@ static ssize_t v4l_stk_read(struct file *fp, char __user *buf,
return ret;
}
-static unsigned int v4l_stk_poll(struct file *fp, poll_table *wait)
+static __poll_t v4l_stk_poll(struct file *fp, poll_table *wait)
{
struct stk_camera *dev = video_drvdata(fp);
- unsigned res = v4l2_ctrl_poll(fp, wait);
+ __poll_t res = v4l2_ctrl_poll(fp, wait);
poll_wait(fp, &dev->wait_frame, wait);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 9fa25de6b5a9..317bf5a3933e 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1423,13 +1423,13 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
return 0;
}
-static unsigned int
+static __poll_t
__tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct tm6000_fh *fh = file->private_data;
struct tm6000_buffer *buf;
- int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
@@ -1457,11 +1457,11 @@ __tm6000_poll(struct file *file, struct poll_table_struct *wait)
return res;
}
-static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
struct tm6000_fh *fh = file->private_data;
struct tm6000_core *dev = fh->dev;
- unsigned int res;
+ __poll_t res;
mutex_lock(&dev->lock);
res = __tm6000_poll(file, wait);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index c8d78b2f3de4..692c463f14f7 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -340,10 +340,10 @@ unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
}
#endif
-unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
- unsigned int ret;
+ __poll_t ret;
mutex_lock(&queue->mutex);
ret = vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 3e7e283a44a8..381f614b2f4c 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1284,36 +1284,30 @@ struct uvc_xu_control_mapping32 {
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_mapping32 *p = (void *)kp;
+ compat_caddr_t info;
+ u32 count;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), menu_info)) ||
- __get_user(kp->menu_count, &up->menu_count))
+ if (copy_from_user(p, up, sizeof(*p)))
return -EFAULT;
- memset(kp->reserved, 0, sizeof(kp->reserved));
-
- if (kp->menu_count == 0) {
- kp->menu_info = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->menu_info))
- return -EFAULT;
- kp->menu_info = compat_ptr(p);
+ count = p->menu_count;
+ info = p->menu_info;
+ memset(kp->reserved, 0, sizeof(kp->reserved));
+ kp->menu_info = count ? compat_ptr(info) : NULL;
+ kp->menu_count = count;
return 0;
}
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
- __put_user(kp->menu_count, &up->menu_count))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
+ put_user(kp->menu_count, &up->menu_count))
return -EFAULT;
- if (__clear_user(up->reserved, sizeof(up->reserved)))
+ if (clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
return 0;
@@ -1330,31 +1324,26 @@ struct uvc_xu_control_query32 {
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_query32 v;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), data)))
+ if (copy_from_user(&v, up, sizeof(v)))
return -EFAULT;
- if (kp->size == 0) {
- kp->data = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->data))
- return -EFAULT;
- kp->data = compat_ptr(p);
-
+ *kp = (struct uvc_xu_control_query){
+ .unit = v.unit,
+ .selector = v.selector,
+ .query = v.query,
+ .size = v.size,
+ .data = v.size ? compat_ptr(v.data) : NULL
+ };
return 0;
}
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), data)))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
-
return 0;
}
@@ -1423,7 +1412,7 @@ static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvc_queue_mmap(&stream->queue, vma);
}
-static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 05398784d1c8..9b44a7cd5ec1 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -678,7 +678,7 @@ extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
extern int uvc_queue_mmap(struct uvc_video_queue *queue,
struct vm_area_struct *vma);
-extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
+extern __poll_t uvc_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
extern unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 1d888661fd03..8b7c19943d46 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1287,12 +1287,12 @@ static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-static unsigned int zr364xx_poll(struct file *file,
+static __poll_t zr364xx_poll(struct file *file,
struct poll_table_struct *wait)
{
struct zr364xx_camera *cam = video_drvdata(file);
struct videobuf_queue *q = &cam->vb_vidq;
- unsigned res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
_DBG("%s\n", __func__);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index cbb2ef43945f..b07657149434 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -3457,7 +3457,7 @@ int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
}
EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c647ba648805..8ad8c1627768 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -331,10 +331,10 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
+static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
- unsigned int res = POLLERR | POLLHUP;
+ __poll_t res = POLLERR | POLLHUP;
if (!vdev->fops->poll)
return DEFAULT_POLLMASK;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index bc580fbe18fa..186156f8952a 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -500,14 +500,14 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
@@ -794,11 +794,11 @@ int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{
struct v4l2_fh *fh = file->private_data;
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
- unsigned int ret;
+ __poll_t ret;
if (m2m_ctx->q_lock)
mutex_lock(m2m_ctx->q_lock);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 43fefa73e0a3..28966fa8c610 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -469,7 +469,7 @@ static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
}
#endif
-static unsigned int subdev_poll(struct file *file, poll_table *wait)
+static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index e87fb13b22dc..9a89d3ae170f 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -1118,13 +1118,13 @@ done:
}
EXPORT_SYMBOL_GPL(videobuf_read_stream);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
videobuf_queue_lock(q);
if (q->streaming) {
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index a8589d96ef72..0d9f772d6d03 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -2018,10 +2018,10 @@ void vb2_core_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_buffer *vb = NULL;
unsigned long flags;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 4075314a6989..a49f7eb98c2e 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -671,11 +671,11 @@ void vb2_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = 0;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
@@ -904,12 +904,12 @@ exit:
}
EXPORT_SYMBOL_GPL(vb2_fop_read);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *q = vdev->queue;
struct mutex *lock = q->lock ? q->lock : vdev->lock;
- unsigned res;
+ __poll_t res;
void *fileio;
/*
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index a385a35c7de9..90a66b3f7ae1 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -32,7 +32,6 @@
#include <linux/pm_runtime.h>
#include <linux/platform_data/mtd-nand-omap2.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/mach-types.h>
@@ -1138,6 +1137,112 @@ struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
}
EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
+ struct gpmc_settings *s,
+ int freq, int latency)
+{
+ struct gpmc_device_timings dev_t;
+ const int t_cer = 15;
+ const int t_avdp = 12;
+ const int t_cez = 20; /* max of t_cez, t_oez */
+ const int t_wpl = 40;
+ const int t_wph = 30;
+ int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
+
+ switch (freq) {
+ case 104:
+ min_gpmc_clk_period = 9600; /* 104 MHz */
+ t_ces = 3;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 3;
+ t_aavdh = 6;
+ t_rdyo = 6;
+ break;
+ case 83:
+ min_gpmc_clk_period = 12000; /* 83 MHz */
+ t_ces = 5;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 9;
+ break;
+ case 66:
+ min_gpmc_clk_period = 15000; /* 66 MHz */
+ t_ces = 6;
+ t_avds = 5;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 11;
+ break;
+ default:
+ min_gpmc_clk_period = 18500; /* 54 MHz */
+ t_ces = 7;
+ t_avds = 7;
+ t_avdh = 7;
+ t_ach = 9;
+ t_aavdh = 7;
+ t_rdyo = 15;
+ break;
+ }
+
+ /* Set synchronous read timings */
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ if (!s->sync_write) {
+ dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+ dev_t.t_aavdh = t_aavdh * 1000;
+ }
+ dev_t.ce_xdelay = true;
+ dev_t.avd_xdelay = true;
+ dev_t.oe_xdelay = true;
+ dev_t.we_xdelay = true;
+ dev_t.clk = min_gpmc_clk_period;
+ dev_t.t_bacc = dev_t.clk;
+ dev_t.t_ces = t_ces * 1000;
+ dev_t.t_avds = t_avds * 1000;
+ dev_t.t_avdh = t_avdh * 1000;
+ dev_t.t_ach = t_ach * 1000;
+ dev_t.cyc_iaa = (latency + 1);
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.cyc_aavdh_oe = 1;
+ dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
+
+ gpmc_calc_timings(t, s, &dev_t);
+}
+
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ int ret;
+ struct gpmc_timings gpmc_t;
+ struct gpmc_settings gpmc_s;
+
+ gpmc_read_settings_dt(dev->of_node, &gpmc_s);
+
+ info->sync_read = gpmc_s.sync_read;
+ info->sync_write = gpmc_s.sync_write;
+ info->burst_len = gpmc_s.burst_len;
+
+ if (!gpmc_s.sync_read && !gpmc_s.sync_write)
+ return 0;
+
+ gpmc_omap_onenand_calc_sync_timings(&gpmc_t, &gpmc_s, freq, latency);
+
+ ret = gpmc_cs_program_settings(cs, &gpmc_s);
+ if (ret < 0)
+ return ret;
+
+ return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
+
int gpmc_get_client_irq(unsigned irq_config)
{
if (!gpmc_irq_domain) {
@@ -1916,41 +2021,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#if IS_ENABLED(CONFIG_MTD_ONENAND)
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- u32 val;
- struct omap_onenand_platform_data *gpmc_onenand_data;
-
- if (of_property_read_u32(child, "reg", &val) < 0) {
- dev_err(&pdev->dev, "%pOF has no 'reg' property\n",
- child);
- return -ENODEV;
- }
-
- gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
- GFP_KERNEL);
- if (!gpmc_onenand_data)
- return -ENOMEM;
-
- gpmc_onenand_data->cs = val;
- gpmc_onenand_data->of_node = child;
- gpmc_onenand_data->dma_channel = -1;
-
- if (!of_property_read_u32(child, "dma-channel", &val))
- gpmc_onenand_data->dma_channel = val;
-
- return gpmc_onenand_init(gpmc_onenand_data);
-}
-#else
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- return 0;
-}
-#endif
-
/**
* gpmc_probe_generic_child - configures the gpmc for a child device
* @pdev: pointer to gpmc platform device
@@ -2053,6 +2123,16 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
+ if (of_node_cmp(child->name, "onenand") == 0) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible OneNAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
if (of_device_is_compatible(child, "ti,omap2-nand")) {
/* NAND specific setup */
val = 8;
@@ -2077,8 +2157,9 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
} else {
ret = of_property_read_u32(child, "bank-width",
&gpmc_s.device_width);
- if (ret < 0) {
- dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n",
+ if (ret < 0 && !gpmc_s.device_width) {
+ dev_err(&pdev->dev,
+ "%pOF has no 'gpmc,device-width' property\n",
child);
goto err;
}
@@ -2188,11 +2269,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
if (!child->name)
continue;
- if (of_node_cmp(child->name, "onenand") == 0)
- ret = gpmc_probe_onenand_child(pdev, child);
- else
- ret = gpmc_probe_generic_child(pdev, child);
-
+ ret = gpmc_probe_generic_child(pdev, child);
if (ret) {
dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
child->name, ret);
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 7310e32b5991..aa2b0786bbe9 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -45,7 +45,7 @@ config MEMSTICK_R592
config MEMSTICK_REALTEK_PCI
tristate "Realtek PCI-E Memstick Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support Memstick card interface
of Realtek PCI-E card reader
@@ -55,7 +55,7 @@ config MEMSTICK_REALTEK_PCI
config MEMSTICK_REALTEK_USB
tristate "Realtek USB Memstick Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support Memstick card interface
of Realtek RTS5129/39 series USB card reader
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 818fa94354ae..a44b4578ba4d 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -24,7 +24,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/memstick.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_ms {
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 2e3cf012ef48..4f64563df7de 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -25,7 +25,7 @@
#include <linux/workqueue.h>
#include <linux/memstick.h>
#include <linux/kthread.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/sched.h>
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 7a93400eea2a..51eb1b027963 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -958,7 +958,7 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
@@ -994,7 +994,7 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
@@ -1128,11 +1128,12 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
static void
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
- pChain->NextChainOffset = next;
- pChain->Address = cpu_to_le32(dma_addr);
+ SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
+ pChain->NextChainOffset = next;
+ pChain->Address = cpu_to_le32(dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1147,18 +1148,18 @@ mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
static void
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
- u32 tmp = dma_addr & 0xFFFFFFFF;
+ SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
+ u32 tmp = dma_addr & 0xFFFFFFFF;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
- MPI_SGE_FLAGS_64_BIT_ADDRESSING);
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING);
- pChain->NextChainOffset = next;
+ pChain->NextChainOffset = next;
- pChain->Address.Low = cpu_to_le32(tmp);
- tmp = (u32)(upper_32_bits(dma_addr));
- pChain->Address.High = cpu_to_le32(tmp);
+ pChain->Address.Low = cpu_to_le32(tmp);
+ tmp = (u32)(upper_32_bits(dma_addr));
+ pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1360,7 +1361,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
-return 0;
+ return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2152,7 +2153,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
device_state);
/* put ioc into READY_STATE */
- if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
+ if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
printk(MYIOC_s_ERR_FMT
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
}
@@ -6348,7 +6349,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
u8 page_type = 0, extend_page;
unsigned long timeleft;
unsigned long flags;
- int in_isr;
+ int in_isr;
u8 issue_hard_reset = 0;
u8 retry_count = 0;
@@ -7697,7 +7698,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
break;
}
if (ds)
- strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
+ strlcpy(evStr, ds, EVENT_DESCR_STR_SZ);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
@@ -8092,15 +8093,15 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
static void
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
{
-union loginfo_type {
- u32 loginfo;
- struct {
- u32 subcode:16;
- u32 code:8;
- u32 originator:4;
- u32 bus_type:4;
- }dw;
-};
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
union loginfo_type sas_loginfo;
char *originator_desc = NULL;
char *code_desc = NULL;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7b3b41368931..8d12017b9893 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2481,24 +2481,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
else
karg.host_no = -1;
- /* Reformat the fw_version into a string
- */
- karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
- ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
- karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
- karg.fw_version[2] = '.';
- karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
- ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
- karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
- karg.fw_version[5] = '.';
- karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
- ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
- karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
- karg.fw_version[8] = '.';
- karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
- ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
- karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
- karg.fw_version[11] = '\0';
+ /* Reformat the fw_version into a string */
+ snprintf(karg.fw_version, sizeof(karg.fw_version),
+ "%.2hhu.%.2hhu.%.2hhu.%.2hhu",
+ ioc->facts.FWVersion.Struct.Major,
+ ioc->facts.FWVersion.Struct.Minor,
+ ioc->facts.FWVersion.Struct.Unit,
+ ioc->facts.FWVersion.Struct.Dev);
/* Issue a config request to get the device serial number
*/
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 345f6035599e..439ee9c5f535 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1165,7 +1165,6 @@ mptsas_schedule_target_reset(void *iocp)
* issue target reset to next device in the queue
*/
- head = &hd->target_reset_list;
if (list_empty(head))
return;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1d20a800e967..b860eb5aa194 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -222,6 +222,16 @@ config MFD_CROS_EC_SPI
response time cannot be guaranteed, we support ignoring
'pre-amble' bytes before the response actually starts.
+config MFD_CROS_EC_CHARDEV
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ select CROS_EC_CTL
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
+
config MFD_ASIC3
bool "Compaq ASIC3"
depends on GPIOLIB && ARM
@@ -877,7 +887,7 @@ config UCB1400_CORE
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
- depends on (ARM || HEXAGON)
+ depends on (ARM || HEXAGON || COMPILE_TEST)
select IRQ_DOMAIN
select MFD_CORE
select REGMAP
@@ -929,17 +939,6 @@ config MFD_RDC321X
southbridge which provides access to GPIOs and Watchdog using the
southbridge PCI device configuration space.
-config MFD_RTSX_PCI
- tristate "Realtek PCI-E card reader"
- depends on PCI
- select MFD_CORE
- help
- This supports for Realtek PCI-Express card reader including rts5209,
- rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, etc.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick, Memory Stick Pro, Secure Digital and
- MultiMediaCard.
-
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
depends on I2C
@@ -953,16 +952,6 @@ config MFD_RT5033
sub-devices like charger, fuel gauge, flash LED, current source,
LDO and Buck.
-config MFD_RTSX_USB
- tristate "Realtek USB card reader"
- depends on USB
- select MFD_CORE
- help
- Select this option to get support for Realtek USB 2.0 card readers
- including RTS5129, RTS5139, RTS5179 and RTS5170.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick Pro, Secure Digital and MultiMediaCard.
-
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -1859,5 +1848,13 @@ config MFD_VEXPRESS_SYSREG
System Registers are the platform configuration block
on the ARM Ltd. Versatile Express board.
+config RAVE_SP_CORE
+ tristate "RAVE SP MCU core driver"
+ depends on SERIAL_DEV_BUS
+ select CRC_CCITT
+ help
+ Select this to get support for the Supervisory Processor
+ device found on several devices in RAVE line of hardware.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d9474ade32e6..d9d2cf0d32ef 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,12 +17,9 @@ cros_ec_core-$(CONFIG_ACPI) += cros_ec_acpi_gpe.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec_core.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
+obj-$(CONFIG_MFD_CROS_EC_CHARDEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
-obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
-obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
-
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
@@ -230,3 +227,5 @@ obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
+obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
+
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index c1c815241e02..1afa27de7191 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1258,6 +1258,19 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
},
};
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+} \
static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
@@ -1318,7 +1331,7 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
return 0;
}
-static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+static int ab8500_bank_registers_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
u32 bank = debug_bank;
@@ -1330,18 +1343,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
return ab8500_registers_print(dev, bank, s);
}
-static int ab8500_registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_bank_registers, inode->i_private);
-}
-
-static const struct file_operations ab8500_registers_fops = {
- .open = ab8500_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_bank_registers);
static int ab8500_print_all_banks(struct seq_file *s, void *p)
{
@@ -1528,7 +1530,7 @@ void ab8500_debug_register_interrupt(int line)
num_interrupts[line]++;
}
-static int ab8500_interrupts_print(struct seq_file *s, void *p)
+static int ab8500_interrupts_show(struct seq_file *s, void *p)
{
int line;
@@ -1557,10 +1559,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_interrupts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_interrupts_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_interrupts);
/*
* - HWREG DB8500 formated routines
@@ -1603,7 +1602,7 @@ static int ab8500_hwreg_open(struct inode *inode, struct file *file)
#define AB8500_LAST_SIM_REG 0x8B
#define AB8505_LAST_SIM_REG 0x8C
-static int ab8500_print_modem_registers(struct seq_file *s, void *p)
+static int ab8500_modem_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
struct ab8500 *ab8500;
@@ -1620,18 +1619,15 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
err = abx500_get_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, &orig_value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
/* Config 1 will allow APE side to read SIM registers */
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG,
AB8500_SUPPLY_CONTROL_CONFIG_1);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
seq_printf(s, " bank 0x%02X:\n", bank);
@@ -1641,36 +1637,30 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
for (reg = AB8500_FIRST_SIM_REG; reg <= last_sim_reg; reg++) {
err = abx500_get_register_interruptible(dev,
bank, reg, &value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n", bank, reg, value);
}
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
+
return 0;
-}
-static int ab8500_modem_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_modem_registers,
- inode->i_private);
+report_read_failure:
+ dev_err(dev, "ab->read fail %d\n", err);
+ return err;
+
+report_write_failure:
+ dev_err(dev, "ab->write fail %d\n", err);
+ return err;
}
-static const struct file_operations ab8500_modem_fops = {
- .open = ab8500_modem_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -1687,21 +1677,9 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bat_ctrl_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
- .open = ab8500_gpadc_bat_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
{
int btemp_ball_raw;
int btemp_ball_convert;
@@ -1718,22 +1696,9 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_btemp_ball_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
- .open = ab8500_gpadc_btemp_ball_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
{
int main_charger_v_raw;
int main_charger_v_convert;
@@ -1750,22 +1715,9 @@ static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
- .open = ab8500_gpadc_main_charger_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
{
int acc_detect1_raw;
int acc_detect1_convert;
@@ -1782,22 +1734,9 @@ static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect1_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
- .open = ab8500_gpadc_acc_detect1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
{
int acc_detect2_raw;
int acc_detect2_convert;
@@ -1814,22 +1753,9 @@ static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect2_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
- .open = ab8500_gpadc_acc_detect2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
{
int aux1_raw;
int aux1_convert;
@@ -1846,20 +1772,9 @@ static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-static const struct file_operations ab8500_gpadc_aux1_fops = {
- .open = ab8500_gpadc_aux1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
{
int aux2_raw;
int aux2_convert;
@@ -1876,20 +1791,9 @@ static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_aux2_fops = {
- .open = ab8500_gpadc_aux2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
{
int main_bat_v_raw;
int main_bat_v_convert;
@@ -1906,22 +1810,9 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_bat_v_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
- .open = ab8500_gpadc_main_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
{
int vbus_v_raw;
int vbus_v_convert;
@@ -1938,20 +1829,9 @@ static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-static const struct file_operations ab8500_gpadc_vbus_v_fops = {
- .open = ab8500_gpadc_vbus_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
{
int main_charger_c_raw;
int main_charger_c_convert;
@@ -1968,22 +1848,9 @@ static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
- .open = ab8500_gpadc_main_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
{
int usb_charger_c_raw;
int usb_charger_c_convert;
@@ -2000,22 +1867,9 @@ static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
- .open = ab8500_gpadc_usb_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
{
int bk_bat_v_raw;
int bk_bat_v_convert;
@@ -2032,21 +1886,9 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bk_bat_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
- .open = ab8500_gpadc_bk_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
{
int die_temp_raw;
int die_temp_convert;
@@ -2063,21 +1905,9 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_die_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-static const struct file_operations ab8500_gpadc_die_temp_fops = {
- .open = ab8500_gpadc_die_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
{
int usb_id_raw;
int usb_id_convert;
@@ -2094,20 +1924,9 @@ static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_id_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_id_fops = {
- .open = ab8500_gpadc_usb_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
{
int xtal_temp_raw;
int xtal_temp_convert;
@@ -2124,21 +1943,9 @@ static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_xtal_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-static const struct file_operations ab8540_gpadc_xtal_temp_fops = {
- .open = ab8540_gpadc_xtal_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2156,22 +1963,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-static const struct file_operations ab8540_gpadc_vbat_true_meas_fops = {
- .open = ab8540_gpadc_vbat_true_meas_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -2197,22 +1991,9 @@ static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_ctrl_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_ctrl_and_ibat_fops = {
- .open = ab8540_gpadc_bat_ctrl_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_meas_raw;
int vbat_meas_convert;
@@ -2237,23 +2018,9 @@ static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_meas_and_ibat_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
- void *p)
+static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2279,23 +2046,9 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations
-ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
{
int bat_temp_raw;
int bat_temp_convert;
@@ -2320,22 +2073,9 @@ static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_temp_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_temp_and_ibat_fops = {
- .open = ab8540_gpadc_bat_temp_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
{
struct ab8500_gpadc *gpadc;
u16 vmain_l, vmain_h, btemp_l, btemp_h;
@@ -2359,18 +2099,7 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_otp_cal_print, inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_otp_calib_fops = {
- .open = ab8540_gpadc_otp_cal_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
{
@@ -2903,14 +2632,6 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
-static const struct file_operations ab8500_interrupts_fops = {
- .open = ab8500_interrupts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
static const struct file_operations ab8500_subscribe_fops = {
.open = ab8500_subscribe_unsubscribe_open,
.write = ab8500_subscribe_write,
@@ -2997,7 +2718,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
goto err;
file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
- &plf->dev, &ab8500_registers_fops);
+ &plf->dev, &ab8500_bank_registers_fops);
if (!file)
goto err;
diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c
index 064bde9cff5a..f684a93a3340 100644
--- a/drivers/mfd/atmel-flexcom.c
+++ b/drivers/mfd/atmel-flexcom.c
@@ -39,34 +39,43 @@
#define FLEX_MR_OPMODE(opmode) (((opmode) << FLEX_MR_OPMODE_OFFSET) & \
FLEX_MR_OPMODE_MASK)
+struct atmel_flexcom {
+ void __iomem *base;
+ u32 opmode;
+ struct clk *clk;
+};
static int atmel_flexcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct clk *clk;
struct resource *res;
- void __iomem *base;
- u32 opmode;
+ struct atmel_flexcom *ddata;
int err;
- err = of_property_read_u32(np, "atmel,flexcom-mode", &opmode);
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ err = of_property_read_u32(np, "atmel,flexcom-mode", &ddata->opmode);
if (err)
return err;
- if (opmode < ATMEL_FLEXCOM_MODE_USART ||
- opmode > ATMEL_FLEXCOM_MODE_TWI)
+ if (ddata->opmode < ATMEL_FLEXCOM_MODE_USART ||
+ ddata->opmode > ATMEL_FLEXCOM_MODE_TWI)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk))
+ return PTR_ERR(ddata->clk);
- err = clk_prepare_enable(clk);
+ err = clk_prepare_enable(ddata->clk);
if (err)
return err;
@@ -76,9 +85,9 @@ static int atmel_flexcom_probe(struct platform_device *pdev)
* inaccessible and are read as zero. Also the external I/O lines of the
* Flexcom are muxed to reach the selected device.
*/
- writel(FLEX_MR_OPMODE(opmode), base + FLEX_MR);
+ writel(FLEX_MR_OPMODE(ddata->opmode), ddata->base + FLEX_MR);
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(ddata->clk);
return devm_of_platform_populate(&pdev->dev);
}
@@ -89,10 +98,34 @@ static const struct of_device_id atmel_flexcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_flexcom_resume(struct device *dev)
+{
+ struct atmel_flexcom *ddata = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = clk_prepare_enable(ddata->clk);
+ if (err)
+ return err;
+
+ val = FLEX_MR_OPMODE(ddata->opmode),
+ writel(val, ddata->base + FLEX_MR);
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL,
+ atmel_flexcom_resume);
+
static struct platform_driver atmel_flexcom_driver = {
.probe = atmel_flexcom_probe,
.driver = {
.name = "atmel_flexcom",
+ .pm = &atmel_flexcom_pm_ops,
.of_match_table = atmel_flexcom_of_match,
},
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 2468b431bb22..e94c72c2faa2 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -129,6 +129,7 @@ static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+ regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
@@ -878,6 +879,9 @@ static struct mfd_cell axp813_cells[] = {
.resources = axp803_pek_resources,
}, {
.name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}
};
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index b0ca5a4c841e..d61024141e2b 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -40,13 +40,13 @@ static struct cros_ec_platform pd_p = {
};
static const struct mfd_cell ec_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &ec_p,
.pdata_size = sizeof(ec_p),
};
static const struct mfd_cell ec_pd_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &pd_p,
.pdata_size = sizeof(pd_p),
};
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index cf6c4f0846b8..e4fafdd96e5e 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -25,9 +25,10 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include "cros_ec_debugfs.h"
#include "cros_ec_dev.h"
+#define DRV_NAME "cros-ec-dev"
+
/* Device variables */
#define CROS_MAX_DEV 128
static int ec_major;
@@ -461,7 +462,7 @@ static int ec_device_remove(struct platform_device *pdev)
}
static const struct platform_device_id cros_ec_id[] = {
- { "cros-ec-ctl", 0 },
+ { DRV_NAME, 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, cros_ec_id);
@@ -493,7 +494,7 @@ static const struct dev_pm_ops cros_ec_dev_pm_ops = {
static struct platform_driver cros_ec_dev_driver = {
.driver = {
- .name = "cros-ec-ctl",
+ .name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
.probe = ec_device_probe,
@@ -544,6 +545,7 @@ static void __exit cros_ec_dev_exit(void)
module_init(cros_ec_dev_init);
module_exit(cros_ec_dev_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
MODULE_VERSION("1.0");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h
index 45e9453608c5..45e9453608c5 100644
--- a/drivers/platform/chrome/cros_ec_dev.h
+++ b/drivers/mfd/cros_ec_dev.h
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 59c82cdcf48d..1b52b8557034 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -72,8 +72,7 @@
* struct cros_ec_spi - information about a SPI-connected EC
*
* @spi: SPI device we are connected to
- * @last_transfer_ns: time that we last finished a transfer, or 0 if there
- * if no record
+ * @last_transfer_ns: time that we last finished a transfer.
* @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn on CS at the start of a transaction.
* @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
@@ -379,18 +378,15 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
u8 sum;
u8 rx_byte;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
@@ -509,18 +505,15 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
u8 rx_byte;
int sum;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 0e0ab9bb1530..9e545eb6e8b4 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -450,6 +450,8 @@ int intel_lpss_probe(struct device *dev,
if (ret)
goto err_remove_ltr;
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
return 0;
err_remove_ltr:
@@ -478,7 +480,9 @@ EXPORT_SYMBOL_GPL(intel_lpss_remove);
static int resume_lpss_device(struct device *dev, void *data)
{
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
+
return 0;
}
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 36adf9e8153e..274306d98ac1 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -16,7 +16,6 @@
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 55d824b3a808..390b27cb2c2e 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -458,7 +458,7 @@ static int kempld_probe(struct platform_device *pdev)
return -EINVAL;
pld->io_base = devm_ioport_map(dev, ioport->start,
- ioport->end - ioport->start);
+ resource_size(ioport));
if (!pld->io_base)
return -ENOMEM;
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index cf1120abbf52..53dc1a43472c 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -1143,11 +1143,6 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->end = res->start + SPIBASE_APL_SZ - 1;
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
info->writeable = !!(bcr & BCR_WPD);
}
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index dc5caeaaa6a1..da9612dbb222 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -15,7 +15,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 3922a93f9f92..663a2398b6b1 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -430,6 +430,7 @@ static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
+ u8 powerhold_mask;
struct device_node *np = palmas_dev->dev->of_node;
if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
@@ -437,8 +438,15 @@ static void palmas_power_off(void)
PALMAS_PRIMARY_SECONDARY_PAD2);
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+ if (of_device_is_compatible(np, "ti,tps65917"))
+ powerhold_mask =
+ TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK;
+ else
+ powerhold_mask =
+ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK;
+
ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
- PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+ powerhold_mask, 0);
if (ret)
dev_err(palmas_dev->dev,
"Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6155d123a84e..f952dff6765f 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -149,7 +149,7 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
*pdev = platform_device_alloc(name, -1);
if (!*pdev) {
- dev_err(pcf->dev, "Falied to allocate %s\n", name);
+ dev_err(pcf->dev, "Failed to allocate %s\n", name);
return;
}
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
new file mode 100644
index 000000000000..5c858e784a89
--- /dev/null
+++ b/drivers/mfd/rave-sp.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Multifunction core driver for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU that is connected via dedicated UART
+ * port
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#include <linux/atomic.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <asm/unaligned.h>
+
+/*
+ * UART protocol using following entities:
+ * - message to MCU => ACK response
+ * - event from MCU => event ACK
+ *
+ * Frame structure:
+ * <STX> <DATA> <CHECKSUM> <ETX>
+ * Where:
+ * - STX - is start of transmission character
+ * - ETX - end of transmission
+ * - DATA - payload
+ * - CHECKSUM - checksum calculated on <DATA>
+ *
+ * If <DATA> or <CHECKSUM> contain one of control characters, then it is
+ * escaped using <DLE> control code. Added <DLE> does not participate in
+ * checksum calculation.
+ */
+#define RAVE_SP_STX 0x02
+#define RAVE_SP_ETX 0x03
+#define RAVE_SP_DLE 0x10
+
+#define RAVE_SP_MAX_DATA_SIZE 64
+#define RAVE_SP_CHECKSUM_SIZE 2 /* Worst case scenario on RDU2 */
+/*
+ * We don't store STX, ETX and unescaped bytes, so Rx is only
+ * DATA + CSUM
+ */
+#define RAVE_SP_RX_BUFFER_SIZE \
+ (RAVE_SP_MAX_DATA_SIZE + RAVE_SP_CHECKSUM_SIZE)
+
+#define RAVE_SP_STX_ETX_SIZE 2
+/*
+ * For Tx we have to have space for everything, STX, EXT and
+ * potentially stuffed DATA + CSUM data + csum
+ */
+#define RAVE_SP_TX_BUFFER_SIZE \
+ (RAVE_SP_STX_ETX_SIZE + 2 * RAVE_SP_RX_BUFFER_SIZE)
+
+#define RAVE_SP_BOOT_SOURCE_GET 0
+#define RAVE_SP_BOOT_SOURCE_SET 1
+
+#define RAVE_SP_RDU2_BOARD_TYPE_RMB 0
+#define RAVE_SP_RDU2_BOARD_TYPE_DEB 1
+
+#define RAVE_SP_BOOT_SOURCE_SD 0
+#define RAVE_SP_BOOT_SOURCE_EMMC 1
+#define RAVE_SP_BOOT_SOURCE_NOR 2
+
+/**
+ * enum rave_sp_deframer_state - Possible state for de-framer
+ *
+ * @RAVE_SP_EXPECT_SOF: Scanning input for start-of-frame marker
+ * @RAVE_SP_EXPECT_DATA: Got start of frame marker, collecting frame
+ * @RAVE_SP_EXPECT_ESCAPED_DATA: Got escape character, collecting escaped byte
+ */
+enum rave_sp_deframer_state {
+ RAVE_SP_EXPECT_SOF,
+ RAVE_SP_EXPECT_DATA,
+ RAVE_SP_EXPECT_ESCAPED_DATA,
+};
+
+/**
+ * struct rave_sp_deframer - Device protocol deframer
+ *
+ * @state: Current state of the deframer
+ * @data: Buffer used to collect deframed data
+ * @length: Number of bytes de-framed so far
+ */
+struct rave_sp_deframer {
+ enum rave_sp_deframer_state state;
+ unsigned char data[RAVE_SP_RX_BUFFER_SIZE];
+ size_t length;
+};
+
+/**
+ * struct rave_sp_reply - Reply as per RAVE device protocol
+ *
+ * @length: Expected reply length
+ * @data: Buffer to store reply payload in
+ * @code: Expected reply code
+ * @ackid: Expected reply ACK ID
+ * @completion: Successful reply reception completion
+ */
+struct rave_sp_reply {
+ size_t length;
+ void *data;
+ u8 code;
+ u8 ackid;
+ struct completion received;
+};
+
+/**
+ * struct rave_sp_checksum - Variant specific checksum implementation details
+ *
+ * @length: Caculated checksum length
+ * @subroutine: Utilized checksum algorithm implementation
+ */
+struct rave_sp_checksum {
+ size_t length;
+ void (*subroutine)(const u8 *, size_t, u8 *);
+};
+
+/**
+ * struct rave_sp_variant_cmds - Variant specific command routines
+ *
+ * @translate: Generic to variant specific command mapping routine
+ *
+ */
+struct rave_sp_variant_cmds {
+ int (*translate)(enum rave_sp_command);
+};
+
+/**
+ * struct rave_sp_variant - RAVE supervisory processor core variant
+ *
+ * @checksum: Variant specific checksum implementation
+ * @cmd: Variant specific command pointer table
+ *
+ */
+struct rave_sp_variant {
+ const struct rave_sp_checksum *checksum;
+ struct rave_sp_variant_cmds cmd;
+};
+
+/**
+ * struct rave_sp - RAVE supervisory processor core
+ *
+ * @serdev: Pointer to underlying serdev
+ * @deframer: Stored state of the protocol deframer
+ * @ackid: ACK ID used in last reply sent to the device
+ * @bus_lock: Lock to serialize access to the device
+ * @reply_lock: Lock protecting @reply
+ * @reply: Pointer to memory to store reply payload
+ *
+ * @variant: Device variant specific information
+ * @event_notifier_list: Input event notification chain
+ *
+ */
+struct rave_sp {
+ struct serdev_device *serdev;
+ struct rave_sp_deframer deframer;
+ atomic_t ackid;
+ struct mutex bus_lock;
+ struct mutex reply_lock;
+ struct rave_sp_reply *reply;
+
+ const struct rave_sp_variant *variant;
+ struct blocking_notifier_head event_notifier_list;
+};
+
+static bool rave_sp_id_is_event(u8 code)
+{
+ return (code & 0xF0) == RAVE_SP_EVNT_BASE;
+}
+
+static void rave_sp_unregister_event_notifier(struct device *dev, void *res)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block *nb = *(struct notifier_block **)res;
+ struct blocking_notifier_head *bnh = &sp->event_notifier_list;
+
+ WARN_ON(blocking_notifier_chain_unregister(bnh, nb));
+}
+
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block **rcnb;
+ int ret;
+
+ rcnb = devres_alloc(rave_sp_unregister_event_notifier,
+ sizeof(*rcnb), GFP_KERNEL);
+ if (!rcnb)
+ return -ENOMEM;
+
+ ret = blocking_notifier_chain_register(&sp->event_notifier_list, nb);
+ if (!ret) {
+ *rcnb = nb;
+ devres_add(dev, rcnb);
+ } else {
+ devres_free(rcnb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_rave_sp_register_event_notifier);
+
+static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
+{
+ *crc = *buf++;
+ size--;
+
+ while (size--)
+ *crc += *buf++;
+
+ *crc = 1 + ~(*crc);
+}
+
+static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
+{
+ const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+
+ /*
+ * While the rest of the wire protocol is little-endian,
+ * CCITT-16 CRC in RDU2 device is sent out in big-endian order.
+ */
+ put_unaligned_be16(calculated, crc);
+}
+
+static void *stuff(unsigned char *dest, const unsigned char *src, size_t n)
+{
+ while (n--) {
+ const unsigned char byte = *src++;
+
+ switch (byte) {
+ case RAVE_SP_STX:
+ case RAVE_SP_ETX:
+ case RAVE_SP_DLE:
+ *dest++ = RAVE_SP_DLE;
+ /* FALLTHROUGH */
+ default:
+ *dest++ = byte;
+ }
+ }
+
+ return dest;
+}
+
+static int rave_sp_write(struct rave_sp *sp, const u8 *data, u8 data_size)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ unsigned char frame[RAVE_SP_TX_BUFFER_SIZE];
+ unsigned char crc[RAVE_SP_CHECKSUM_SIZE];
+ unsigned char *dest = frame;
+ size_t length;
+
+ if (WARN_ON(checksum_length > sizeof(crc)))
+ return -ENOMEM;
+
+ if (WARN_ON(data_size > sizeof(frame)))
+ return -ENOMEM;
+
+ sp->variant->checksum->subroutine(data, data_size, crc);
+
+ *dest++ = RAVE_SP_STX;
+ dest = stuff(dest, data, data_size);
+ dest = stuff(dest, crc, checksum_length);
+ *dest++ = RAVE_SP_ETX;
+
+ length = dest - frame;
+
+ print_hex_dump(KERN_DEBUG, "rave-sp tx: ", DUMP_PREFIX_NONE,
+ 16, 1, frame, length, false);
+
+ return serdev_device_write(sp->serdev, frame, length, HZ);
+}
+
+static u8 rave_sp_reply_code(u8 command)
+{
+ /*
+ * There isn't a single rule that describes command code ->
+ * ACK code transformation, but, going through various
+ * versions of ICDs, there appear to be three distinct groups
+ * that can be described by simple transformation.
+ */
+ switch (command) {
+ case 0xA0 ... 0xBE:
+ /*
+ * Commands implemented by firmware found in RDU1 and
+ * older devices all seem to obey the following rule
+ */
+ return command + 0x20;
+ case 0xE0 ... 0xEF:
+ /*
+ * Events emitted by all versions of the firmare use
+ * least significant bit to get an ACK code
+ */
+ return command | 0x01;
+ default:
+ /*
+ * Commands implemented by firmware found in RDU2 are
+ * similar to "old" commands, but they use slightly
+ * different offset
+ */
+ return command + 0x40;
+ }
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size)
+{
+ struct rave_sp_reply reply = {
+ .data = reply_data,
+ .length = reply_data_size,
+ .received = COMPLETION_INITIALIZER_ONSTACK(reply.received),
+ };
+ unsigned char *data = __data;
+ int command, ret = 0;
+ u8 ackid;
+
+ command = sp->variant->cmd.translate(data[0]);
+ if (command < 0)
+ return command;
+
+ ackid = atomic_inc_return(&sp->ackid);
+ reply.ackid = ackid;
+ reply.code = rave_sp_reply_code((u8)command),
+
+ mutex_lock(&sp->bus_lock);
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = &reply;
+ mutex_unlock(&sp->reply_lock);
+
+ data[0] = command;
+ data[1] = ackid;
+
+ rave_sp_write(sp, data, data_size);
+
+ if (!wait_for_completion_timeout(&reply.received, HZ)) {
+ dev_err(&sp->serdev->dev, "Command timeout\n");
+ ret = -ETIMEDOUT;
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = NULL;
+ mutex_unlock(&sp->reply_lock);
+ }
+
+ mutex_unlock(&sp->bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rave_sp_exec);
+
+static void rave_sp_receive_event(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ u8 cmd[] = {
+ [0] = rave_sp_reply_code(data[0]),
+ [1] = data[1],
+ };
+
+ rave_sp_write(sp, cmd, sizeof(cmd));
+
+ blocking_notifier_call_chain(&sp->event_notifier_list,
+ rave_sp_action_pack(data[0], data[2]),
+ NULL);
+}
+
+static void rave_sp_receive_reply(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ struct device *dev = &sp->serdev->dev;
+ struct rave_sp_reply *reply;
+ const size_t payload_length = length - 2;
+
+ mutex_lock(&sp->reply_lock);
+ reply = sp->reply;
+
+ if (reply) {
+ if (reply->code == data[0] && reply->ackid == data[1] &&
+ payload_length >= reply->length) {
+ /*
+ * We are relying on memcpy(dst, src, 0) to be a no-op
+ * when handling commands that have a no-payload reply
+ */
+ memcpy(reply->data, &data[2], reply->length);
+ complete(&reply->received);
+ sp->reply = NULL;
+ } else {
+ dev_err(dev, "Ignoring incorrect reply\n");
+ dev_dbg(dev, "Code: expected = 0x%08x received = 0x%08x\n",
+ reply->code, data[0]);
+ dev_dbg(dev, "ACK ID: expected = 0x%08x received = 0x%08x\n",
+ reply->ackid, data[1]);
+ dev_dbg(dev, "Length: expected = %zu received = %zu\n",
+ reply->length, payload_length);
+ }
+ }
+
+ mutex_unlock(&sp->reply_lock);
+}
+
+static void rave_sp_receive_frame(struct rave_sp *sp,
+ const unsigned char *data,
+ size_t length)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ const size_t payload_length = length - checksum_length;
+ const u8 *crc_reported = &data[payload_length];
+ struct device *dev = &sp->serdev->dev;
+ u8 crc_calculated[checksum_length];
+
+ print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
+ 16, 1, data, length, false);
+
+ if (unlikely(length <= checksum_length)) {
+ dev_warn(dev, "Dropping short frame\n");
+ return;
+ }
+
+ sp->variant->checksum->subroutine(data, payload_length,
+ crc_calculated);
+
+ if (memcmp(crc_calculated, crc_reported, checksum_length)) {
+ dev_warn(dev, "Dropping bad frame\n");
+ return;
+ }
+
+ if (rave_sp_id_is_event(data[0]))
+ rave_sp_receive_event(sp, data, length);
+ else
+ rave_sp_receive_reply(sp, data, length);
+}
+
+static int rave_sp_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp = dev_get_drvdata(dev);
+ struct rave_sp_deframer *deframer = &sp->deframer;
+ const unsigned char *src = buf;
+ const unsigned char *end = buf + size;
+
+ while (src < end) {
+ const unsigned char byte = *src++;
+
+ switch (deframer->state) {
+ case RAVE_SP_EXPECT_SOF:
+ if (byte == RAVE_SP_STX)
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+
+ case RAVE_SP_EXPECT_DATA:
+ /*
+ * Treat special byte values first
+ */
+ switch (byte) {
+ case RAVE_SP_ETX:
+ rave_sp_receive_frame(sp,
+ deframer->data,
+ deframer->length);
+ /*
+ * Once we extracted a complete frame
+ * out of a stream, we call it done
+ * and proceed to bailing out while
+ * resetting the framer to initial
+ * state, regardless if we've consumed
+ * all of the stream or not.
+ */
+ goto reset_framer;
+ case RAVE_SP_STX:
+ dev_warn(dev, "Bad frame: STX before ETX\n");
+ /*
+ * If we encounter second "start of
+ * the frame" marker before seeing
+ * corresponding "end of frame", we
+ * reset the framer and ignore both:
+ * frame started by first SOF and
+ * frame started by current SOF.
+ *
+ * NOTE: The above means that only the
+ * frame started by third SOF, sent
+ * after this one will have a chance
+ * to get throught.
+ */
+ goto reset_framer;
+ case RAVE_SP_DLE:
+ deframer->state = RAVE_SP_EXPECT_ESCAPED_DATA;
+ /*
+ * If we encounter escape sequence we
+ * need to skip it and collect the
+ * byte that follows. We do it by
+ * forcing the next iteration of the
+ * encompassing while loop.
+ */
+ continue;
+ }
+ /*
+ * For the rest of the bytes, that are not
+ * speical snoflakes, we do the same thing
+ * that we do to escaped data - collect it in
+ * deframer buffer
+ */
+
+ /* FALLTHROUGH */
+
+ case RAVE_SP_EXPECT_ESCAPED_DATA:
+ deframer->data[deframer->length++] = byte;
+
+ if (deframer->length == sizeof(deframer->data)) {
+ dev_warn(dev, "Bad frame: Too long\n");
+ /*
+ * If the amount of data we've
+ * accumulated for current frame so
+ * far starts to exceed the capacity
+ * of deframer's buffer, there's
+ * nothing else we can do but to
+ * discard that data and start
+ * assemblying a new frame again
+ */
+ goto reset_framer;
+ }
+
+ /*
+ * We've extracted out special byte, now we
+ * can go back to regular data collecting
+ */
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+ }
+ }
+
+ /*
+ * The only way to get out of the above loop and end up here
+ * is throught consuming all of the supplied data, so here we
+ * report that we processed it all.
+ */
+ return size;
+
+reset_framer:
+ /*
+ * NOTE: A number of codepaths that will drop us here will do
+ * so before consuming all 'size' bytes of the data passed by
+ * serdev layer. We rely on the fact that serdev layer will
+ * re-execute this handler with the remainder of the Rx bytes
+ * once we report actual number of bytes that we processed.
+ */
+ deframer->state = RAVE_SP_EXPECT_SOF;
+ deframer->length = 0;
+
+ return src - buf;
+}
+
+static int rave_sp_rdu1_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_STATUS &&
+ command <= RAVE_SP_CMD_CONTROL_EVENTS)
+ return command;
+
+ return -EINVAL;
+}
+
+static int rave_sp_rdu2_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_GET_FIRMWARE_VERSION &&
+ command <= RAVE_SP_CMD_GET_GPIO_STATE)
+ return command;
+
+ if (command == RAVE_SP_CMD_REQ_COPPER_REV) {
+ /*
+ * As per RDU2 ICD 3.4.47 CMD_GET_COPPER_REV code is
+ * different from that for RDU1 and it is set to 0x28.
+ */
+ return 0x28;
+ }
+
+ return rave_sp_rdu1_cmd_translate(command);
+}
+
+static int rave_sp_default_cmd_translate(enum rave_sp_command command)
+{
+ /*
+ * All of the following command codes were taken from "Table :
+ * Communications Protocol Message Types" in section 3.3
+ * "MESSAGE TYPES" of Rave PIC24 ICD.
+ */
+ switch (command) {
+ case RAVE_SP_CMD_GET_FIRMWARE_VERSION:
+ return 0x11;
+ case RAVE_SP_CMD_GET_BOOTLOADER_VERSION:
+ return 0x12;
+ case RAVE_SP_CMD_BOOT_SOURCE:
+ return 0x14;
+ case RAVE_SP_CMD_SW_WDT:
+ return 0x1C;
+ case RAVE_SP_CMD_RESET:
+ return 0x1E;
+ case RAVE_SP_CMD_RESET_REASON:
+ return 0x1F;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct rave_sp_checksum rave_sp_checksum_8b2c = {
+ .length = 1,
+ .subroutine = csum_8b2c,
+};
+
+static const struct rave_sp_checksum rave_sp_checksum_ccitt = {
+ .length = 2,
+ .subroutine = csum_ccitt,
+};
+
+static const struct rave_sp_variant rave_sp_legacy = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_default_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu1 = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_rdu1_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu2 = {
+ .checksum = &rave_sp_checksum_ccitt,
+ .cmd = {
+ .translate = rave_sp_rdu2_cmd_translate,
+ },
+};
+
+static const struct of_device_id rave_sp_dt_ids[] = {
+ { .compatible = "zii,rave-sp-niu", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-mezz", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-esb", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-rdu1", .data = &rave_sp_rdu1 },
+ { .compatible = "zii,rave-sp-rdu2", .data = &rave_sp_rdu2 },
+ { /* sentinel */ }
+};
+
+static const struct serdev_device_ops rave_sp_serdev_device_ops = {
+ .receive_buf = rave_sp_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int rave_sp_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp;
+ u32 baud;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "current-speed", &baud)) {
+ dev_err(dev,
+ "'current-speed' is not specified in device node\n");
+ return -EINVAL;
+ }
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+ sp->serdev = serdev;
+ dev_set_drvdata(dev, sp);
+
+ sp->variant = of_device_get_match_data(dev);
+ if (!sp->variant)
+ return -ENODEV;
+
+ mutex_init(&sp->bus_lock);
+ mutex_init(&sp->reply_lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&sp->event_notifier_list);
+
+ serdev_device_set_client_ops(serdev, &rave_sp_serdev_device_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, baud);
+
+ return devm_of_platform_populate(dev);
+}
+
+MODULE_DEVICE_TABLE(of, rave_sp_dt_ids);
+
+static struct serdev_device_driver rave_sp_drv = {
+ .probe = rave_sp_probe,
+ .driver = {
+ .name = "rave-sp",
+ .of_match_table = rave_sp_dt_ids,
+ },
+};
+module_serdev_device_driver(rave_sp_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP core driver");
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index 075330a25f61..a00f99f36559 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-lptimer.h>
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index a6675a449409..1d347e5dfa79 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-timers.h>
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index b93fe4c4957a..7eaa40bc703f 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -13,6 +13,7 @@
*/
#include <linux/err.h>
+#include <linux/hwspinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -87,6 +88,24 @@ static struct syscon *of_syscon_register(struct device_node *np)
if (ret)
reg_io_width = 4;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+ syscon_config.use_hwlock = true;
+ syscon_config.hwlock_id = ret;
+ syscon_config.hwlock_mode = HWLOCK_IRQSTATE;
+ } else if (ret < 0) {
+ switch (ret) {
+ case -ENOENT:
+ /* Ignore missing hwlock, it's optional. */
+ break;
+ default:
+ pr_err("Failed to retrieve valid hwlock: %d\n", ret);
+ /* fall-through */
+ case -EPROBE_DEFER:
+ goto err_regmap;
+ }
+ }
+
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 0f3fab47fe48..3cd958a31f36 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -124,7 +124,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
struct ti_tscadc_dev *tscadc;
struct resource *res;
struct clk *clk;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node;
struct mfd_cell *cell;
struct property *prop;
const __be32 *cur;
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
index 83af78c1b0eb..ebf54cc28f7a 100644
--- a/drivers/mfd/tmio_core.c
+++ b/drivers/mfd/tmio_core.c
@@ -9,6 +9,26 @@
#include <linux/export.h>
#include <linux/mfd/tmio.h>
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
{
/* Enable the MMC/SD Control registers */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f1a5c2357b14..7c0fa24f9067 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,6 +496,10 @@ config PCI_ENDPOINT_TEST
Enable this configuration option to enable the host side test driver
for PCI Endpoint.
+config MISC_RTSX
+ tristate
+ default MISC_RTSX_PCI || MISC_RTSX_USB
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -508,4 +512,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/cardreader/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5ca5f64df478..8d8cc096063b 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
+obj-$(CONFIG_MISC_RTSX) += cardreader/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig
new file mode 100644
index 000000000000..69e815e32a8c
--- /dev/null
+++ b/drivers/misc/cardreader/Kconfig
@@ -0,0 +1,20 @@
+config MISC_RTSX_PCI
+ tristate "Realtek PCI-E card reader"
+ depends on PCI
+ select MFD_CORE
+ help
+ This supports for Realtek PCI-Express card reader including rts5209,
+ rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260.
+ Realtek card readers support access to many types of memory cards,
+ such as Memory Stick, Memory Stick Pro, Secure Digital and
+ MultiMediaCard.
+
+config MISC_RTSX_USB
+ tristate "Realtek USB card reader"
+ depends on USB
+ select MFD_CORE
+ help
+ Select this option to get support for Realtek USB 2.0 card readers
+ including RTS5129, RTS5139, RTS5179 and RTS5170.
+ Realtek card reader supports access to many types of memory cards,
+ such as Memory Stick Pro, Secure Digital and MultiMediaCard.
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
new file mode 100644
index 000000000000..9fabfcc6fa7a
--- /dev/null
+++ b/drivers/misc/cardreader/Makefile
@@ -0,0 +1,4 @@
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+
+obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
+obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/mfd/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index b3ae6592014a..434fd070d3e3 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5209.c b/drivers/misc/cardreader/rts5209.c
index b95beecf767f..ce68c48d8ec9 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5227.c b/drivers/misc/cardreader/rts5227.c
index ff296a4bf3d2..024dcba8d6c8 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9ed9dc84eac8..9119261337cc 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 7fcf37ba922c..dbe013abdb83 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
@@ -738,4 +738,3 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
}
-
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
new file mode 100644
index 000000000000..07cb93abf685
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.c
@@ -0,0 +1,748 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Steven FENG <steven_feng@realsil.com.cn>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5260.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[6][3] = {
+ {0x94, 0x94, 0x94},
+ {0x11, 0x11, 0x18},
+ {0x55, 0x55, 0x5C},
+ {0x94, 0x94, 0x94},
+ {0x94, 0x94, 0x94},
+ {0xFF, 0xFF, 0xFF},
+ };
+ u8 driving_1v8[6][3] = {
+ {0x9A, 0x89, 0x89},
+ {0xC4, 0xC4, 0xC4},
+ {0x3C, 0x3C, 0x3C},
+ {0x9B, 0x99, 0x99},
+ {0x9A, 0x89, 0x89},
+ {0xFE, 0xFE, 0xFE},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5260_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON);
+}
+
+static int rts5260_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWERON);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ msleep(20);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ sd_set_sample_push_timing_sd30(pcr);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
+
+ return err;
+}
+
+static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_17);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rts5260_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+}
+
+static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rts5260_stop_cmd(pcr);
+ rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
+
+ if (option->ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+}
+
+static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5260_card_before_power_off(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWEROFF);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ return err;
+}
+
+static void rts5260_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_THD_MASK,
+ option->sd_400mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_THD_MASK,
+ RTS5260_DVIO_OCP_THD_350);
+
+ rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
+ RTS5260_DV331812_OCP_THD_MASK,
+ RTS5260_DV331812_OCP_THD_210);
+
+ mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN, 0);
+ }
+}
+
+static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+}
+
+static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+}
+
+int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val);
+}
+
+void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+ val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR);
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0);
+}
+
+void rts5260_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_sd_power_off_card3v3(pcr);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_ms_power_off_card3v3(pcr);
+
+ if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
+ rtsx_pci_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ pcr->ocp_stat2 = 0;
+ }
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ }
+}
+
+int rts5260_init_hw(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ rtsx_pci_init_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ /* Rest L1SUB Config */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL,
+ CLK_PM_EN, CLK_PM_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ PWR_GATE_EN, PWR_GATE_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF,
+ PWD_SUSPND_EN, PWD_SUSPND_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL,
+ U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG,
+ OBFF_EN_MASK, OBFF_DISABLE);
+
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+{
+ int lss_l1_1, lss_l1_2;
+
+ lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_1_EN);
+ lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_2_EN);
+
+ if (lss_l1_2) {
+ pcr_dbg(pcr, "Set parameters for L1.2.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_2_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_2_PD_FE_EN);
+ } else if (lss_l1_1) {
+ pcr_dbg(pcr, "Set parameters for L1.1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_1_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_1_PD_FE_EN);
+ } else {
+ pcr_dbg(pcr, "Set parameters for L1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_0_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_0_PD_FE_EN);
+ }
+
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ /*Option cut APHY*/
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0,
+ 0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1,
+ 0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2,
+ 0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3,
+ 0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT);
+ /*CDR DEC*/
+ rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT);
+ /*PWMPFM*/
+ rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE,
+ 0xFF, CFG_LP_FPWM_VALUE_DEFAULT);
+ /*No Power Saving WA*/
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE,
+ 0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT);
+}
+
+static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 lval;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ rts5260_pwr_saving_setting(pcr);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+ rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+ rts5260_init_from_cfg(pcr);
+
+ /* force no MDIO*/
+ rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4,
+ 0xFF, RTS5260_MIMO_DISABLE);
+ /*Modify SDVCC Tune Default Parameters!*/
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rts5260_init_hw(pcr);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return 0;
+}
+
+void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5260_pcr_ops = {
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+ .turn_on_led = rts5260_turn_on_led,
+ .turn_off_led = rts5260_turn_off_led,
+ .extra_init_hw = rts5260_extra_init_hw,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rts5260_card_power_on,
+ .card_power_off = rts5260_card_power_off,
+ .switch_output_voltage = rts5260_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
+ .stop_cmd = rts5260_stop_cmd,
+ .set_aspm = rts5260_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5260_enable_ocp,
+ .disable_ocp = rts5260_disable_ocp,
+ .init_ocp = rts5260_init_ocp,
+ .process_ocp = rts5260_process_ocp,
+ .get_ocpstat = rts5260_get_ocpstat,
+ .clear_ocpstat = rts5260_clear_ocpstat,
+};
+
+void rts5260_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5260_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+
+ pcr->ops = &rts5260_pcr_ops;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+ option->ocp_en = 1;
+ if (option->ocp_en)
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+ option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
+ option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
+}
diff --git a/drivers/misc/cardreader/rts5260.h b/drivers/misc/cardreader/rts5260.h
new file mode 100644
index 000000000000..53a1411c8868
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.h
@@ -0,0 +1,45 @@
+#ifndef __RTS5260_H__
+#define __RTS5260_H__
+
+#define RTS5260_DVCC_CTRL 0xFF73
+#define RTS5260_DVCC_OCP_EN (0x01 << 7)
+#define RTS5260_DVCC_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVCC_POWERON (0x01 << 3)
+#define RTS5260_DVCC_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DVIO_CTRL 0xFF75
+#define RTS5260_DVIO_OCP_EN (0x01 << 7)
+#define RTS5260_DVIO_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVIO_POWERON (0x01 << 3)
+#define RTS5260_DVIO_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DV331812_CFG 0xFF71
+#define RTS5260_DV331812_OCP_EN (0x01 << 7)
+#define RTS5260_DV331812_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DV331812_POWERON (0x01 << 3)
+#define RTS5260_DV331812_SEL (0x01 << 2)
+#define RTS5260_DV331812_VDD1 (0x01 << 2)
+#define RTS5260_DV331812_VDD2 (0x00 << 2)
+
+#define RTS5260_DV331812_OCP_THD_120 (0x00 << 4)
+#define RTS5260_DV331812_OCP_THD_140 (0x01 << 4)
+#define RTS5260_DV331812_OCP_THD_160 (0x02 << 4)
+#define RTS5260_DV331812_OCP_THD_180 (0x03 << 4)
+#define RTS5260_DV331812_OCP_THD_210 (0x04 << 4)
+#define RTS5260_DV331812_OCP_THD_240 (0x05 << 4)
+#define RTS5260_DV331812_OCP_THD_270 (0x06 << 4)
+#define RTS5260_DV331812_OCP_THD_300 (0x07 << 4)
+
+#define RTS5260_DVIO_OCP_THD_250 (0x00 << 4)
+#define RTS5260_DVIO_OCP_THD_300 (0x01 << 4)
+#define RTS5260_DVIO_OCP_THD_350 (0x02 << 4)
+#define RTS5260_DVIO_OCP_THD_400 (0x03 << 4)
+#define RTS5260_DVIO_OCP_THD_450 (0x04 << 4)
+#define RTS5260_DVIO_OCP_THD_500 (0x05 << 4)
+#define RTS5260_DVIO_OCP_THD_550 (0x06 << 4)
+#define RTS5260_DVIO_OCP_THD_600 (0x07 << 4)
+
+#define RTS5260_DVCC_OCP_THD_550 (0x00 << 4)
+#define RTS5260_DVCC_OCP_THD_970 (0x05 << 4)
+
+#endif
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index c3ed885c155c..fd09b0960097 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -29,7 +29,7 @@
#include <linux/idr.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <linux/mmc/card.h>
#include <asm/unaligned.h>
@@ -62,6 +62,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -334,6 +335,9 @@ EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
{
+ if (pcr->ops->stop_cmd)
+ return pcr->ops->stop_cmd(pcr);
+
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
@@ -826,7 +830,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return err;
/* Wait SSC clock stable */
- udelay(10);
+ udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
@@ -963,6 +967,20 @@ static void rtsx_pci_card_detect(struct work_struct *work)
pcr->slots[RTSX_MS_CARD].p_dev);
}
+void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->process_ocp)
+ pcr->ops->process_ocp(pcr);
+}
+
+int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+{
+ if (pcr->option.ocp_en)
+ rtsx_pci_process_ocp(pcr);
+
+ return 0;
+}
+
static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
{
struct rtsx_pcr *pcr = dev_id;
@@ -987,6 +1005,9 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg &= (pcr->bier | 0x7FFFFF);
+ if (int_reg & SD_OC_INT)
+ rtsx_pci_process_ocp_interrupt(pcr);
+
if (int_reg & SD_INT) {
if (int_reg & SD_EXIST) {
pcr->card_inserted |= SD_EXIST;
@@ -1119,6 +1140,102 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
}
#endif
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->enable_ocp)
+ pcr->ops->enable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->disable_ocp)
+ pcr->ops->disable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+}
+
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->init_ocp) {
+ pcr->ops->init_ocp(pcr);
+ } else {
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (option->ocp_en) {
+ u8 val = option->sd_400mA_ocp_thd;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+ rtsx_pci_write_register(pcr, REG_OCPPARA1,
+ SD_OCP_TIME_MASK, SD_OCP_TIME_800);
+ rtsx_pci_write_register(pcr, REG_OCPPARA2,
+ SD_OCP_THD_MASK, val);
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH,
+ SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ /* OC power down */
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+ }
+ }
+}
+
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ if (pcr->ops->get_ocpstat)
+ return pcr->ops->get_ocpstat(pcr, val);
+ else
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->clear_ocpstat) {
+ pcr->ops->clear_ocpstat(pcr);
+ } else {
+ u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ }
+}
+
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+
+ msleep(50);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+
+ return 0;
+}
+
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+
+ return 0;
+}
+
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
int err;
@@ -1189,6 +1306,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_5250:
case PID_524A:
case PID_525A:
+ case PID_5260:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1265,6 +1383,9 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+ case 0x5260:
+ rts5260_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index ec784e04fe20..6ea1655db0bb 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -22,7 +22,7 @@
#ifndef __RTSX_PCR_H
#define __RTSX_PCR_H
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#define MIN_DIV_N_PCR 80
#define MAX_DIV_N_PCR 208
@@ -44,6 +44,8 @@
#define ASPM_MASK_NEG 0xFC
#define MASK_8_BIT_DEF 0xFF
+#define SSC_CLOCK_STABLE_WAIT 130
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -57,6 +59,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr);
void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
+void rts5260_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -99,5 +102,12 @@ do { \
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59d61b04c197..b97903ff1a72 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -23,7 +23,7 @@
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
static int polling_pipe = 1;
module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 7c11bad5cded..753b1a698fc4 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -427,7 +427,7 @@ int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
return afu_mmap(file, vm);
}
EXPORT_SYMBOL_GPL(cxl_fd_mmap);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
{
return afu_poll(file, poll);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index e46a4062904a..a798c2ccd67d 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -1081,7 +1081,7 @@ int afu_open(struct inode *inode, struct file *file);
int afu_release(struct inode *inode, struct file *file);
long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int afu_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll);
ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
extern const struct file_operations afu_fops;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 76c0b0ca9388..90341ccda9bd 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -354,10 +354,10 @@ static inline bool ctx_event_pending(struct cxl_context *ctx)
return false;
}
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
- int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 512a4897dbf6..7fd0bdc1436a 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -54,7 +54,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
return false;
}
- set_dma_ops(&dev->dev, &dma_direct_ops);
+ set_dma_ops(&dev->dev, &dma_nommu_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
return _cxl_pci_associate_default_context(dev, afu);
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 097e3092c158..95ce3e891b1b 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -514,7 +514,7 @@ static ssize_t ilo_write(struct file *fp, const char __user *buf,
return err ? -EFAULT : len;
}
-static unsigned int ilo_poll(struct file *fp, poll_table *wait)
+static __poll_t ilo_poll(struct file *fp, poll_table *wait)
{
struct ccb_data *data = fp->private_data;
struct ccb *driver_ccb = &data->driver_ccb;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 8d53609861d8..e49888eab87d 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -651,7 +651,7 @@ out:
return retval;
}
-static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+static __poll_t lis3lv02d_misc_poll(struct file *file, poll_table *wait)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e825f013e54e..505b710291e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -542,12 +542,12 @@ static long mei_compat_ioctl(struct file *file,
*
* Return: poll mask
*/
-static unsigned int mei_poll(struct file *file, poll_table *wait)
+static __poll_t mei_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
bool notify_en;
if (WARN_ON(!cl || !cl->dev))
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index ddc9e4b08b5c..8a3e48ec37dd 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -1311,10 +1311,10 @@ static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq,
spin_lock(&ep->lock);
}
-unsigned int
+__poll_t
__scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev_dbg(scif_info.mdev.this_device,
"SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]);
@@ -1389,7 +1389,8 @@ scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs)
{
struct poll_wqueues table;
poll_table *pt;
- int i, mask, count = 0, timed_out = timeout_msecs == 0;
+ int i, count = 0, timed_out = timeout_msecs == 0;
+ __poll_t mask;
u64 timeout = timeout_msecs < 0 ? MAX_SCHEDULE_TIMEOUT
: msecs_to_jiffies(timeout_msecs);
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index 1771d7a9b8d0..f39b663da287 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -203,7 +203,7 @@ void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
int __scif_flush(scif_epd_t epd);
int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
-unsigned int __scif_pollfd(struct file *f, poll_table *wait,
+__poll_t __scif_pollfd(struct file *f, poll_table *wait,
struct scif_endpt *ep);
int __scif_pin_pages(void *addr, size_t len, int *out_prot,
int map_flags, scif_pinned_pages_t *pages);
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
index f7e826142a72..5c2a57ae4f85 100644
--- a/drivers/misc/mic/scif/scif_fd.c
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -41,7 +41,7 @@ static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
return scif_mmap(vma, priv);
}
-static unsigned int scif_fdpoll(struct file *f, poll_table *wait)
+static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
{
struct scif_endpt *priv = f->private_data;
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index fed992e2c258..4120ed8f0cae 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -1023,10 +1023,10 @@ __unlock_ret:
* in the card->host (TX) path, when userspace is unblocked by poll it
* must drain all available descriptors or it can stall.
*/
-static unsigned int vop_poll(struct file *f, poll_table *wait)
+static __poll_t vop_poll(struct file *f, poll_table *wait)
{
struct vop_vdev *vdev = f->private_data;
- int mask = 0;
+ __poll_t mask = 0;
mutex_lock(&vdev->vdev_mutex);
if (vop_vdev_inited(vdev)) {
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 30754927fd80..8fa68cf308e0 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -256,10 +256,10 @@ static int phantom_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int phantom_poll(struct file *file, poll_table *wait)
+static __poll_t phantom_poll(struct file *file, poll_table *wait)
{
struct phantom_device *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
poll_wait(file, &dev->wait, wait);
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 8a16a26e9658..6640e7651533 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -166,11 +166,11 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
* This is used to wake up the VMX when a VMCI call arrives, or
* to wake up select() or poll() at the next clock tick.
*/
-static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
struct vmci_ctx *context = vmci_host_dev->context;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
/* Check for VMCI calls to this VM context. */
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ccfa98af1dd3..20135a5de748 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -63,7 +63,13 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+/*
+ * Set a 10 second timeout for polling write request busy state. Note, mmc core
+ * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
+ * second software timer to timeout the whole request, so 10 seconds should be
+ * ample.
+ */
+#define MMC_BLK_TIMEOUT_MS (10 * 1000)
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
@@ -112,6 +118,7 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_CQE_RECOVERY BIT(4)
/*
* Only set in main mmc_blk_data associated
@@ -189,7 +196,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
int devidx = mmc_get_devidx(md->disk);
- blk_cleanup_queue(md->queue.queue);
+ blk_put_queue(md->queue.queue);
ida_simple_remove(&mmc_blk_ida, devidx);
put_disk(md->disk);
kfree(md);
@@ -921,14 +928,54 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
return 0;
}
+static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
+{
+ if (host->actual_clock)
+ return host->actual_clock / 1000;
+
+ /* Clock may be subject to a divisor, fudge it by a factor of 2. */
+ if (host->ios.clock)
+ return host->ios.clock / 2000;
+
+ /* How can there be no clock */
+ WARN_ON_ONCE(1);
+ return 100; /* 100 kHz is minimum possible value */
+}
+
+static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
+ struct mmc_data *data)
+{
+ unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
+ unsigned int khz;
+
+ if (data->timeout_clks) {
+ khz = mmc_blk_clock_khz(host);
+ ms += DIV_ROUND_UP(data->timeout_clks, khz);
+ }
+
+ return ms;
+}
+
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- bool hw_busy_detect, struct request *req, bool *gen_err)
+ struct request *req, u32 *resp_errs)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
u32 status;
do {
+ bool done = time_after(jiffies, timeout);
+
err = __mmc_send_status(card, &status, 5);
if (err) {
pr_err("%s: error %d requesting status\n",
@@ -936,25 +983,18 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
return err;
}
- if (status & R1_ERROR) {
- pr_err("%s: %s: error sending status cmd, status %#x\n",
- req->rq_disk->disk_name, __func__, status);
- *gen_err = true;
- }
-
- /* We may rely on the host hw to handle busy detection.*/
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
- hw_busy_detect)
- break;
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
/*
* Timeout if the device never becomes ready for data and never
* leaves the program state.
*/
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state! %s %s\n",
+ if (done) {
+ pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__);
+ req->rq_disk->disk_name, __func__, status);
return -ETIMEDOUT;
}
@@ -963,229 +1003,11 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
* so make sure to check both the busy
* indication and the card state.
*/
- } while (!(status & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ } while (!mmc_blk_in_tran_state(status));
return err;
}
-static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, bool *gen_err, u32 *stop_status)
-{
- struct mmc_host *host = card->host;
- struct mmc_command cmd = {};
- int err;
- bool use_r1b_resp = rq_data_dir(req) == WRITE;
-
- /*
- * Normally we use R1B responses for WRITE, but in cases where the host
- * has specified a max_busy_timeout we need to validate it. A failure
- * means we need to prevent the host from doing hw busy detection, which
- * is done by converting to a R1 response instead.
- */
- if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
- cmd.opcode = MMC_STOP_TRANSMISSION;
- if (use_r1b_resp) {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- }
-
- err = mmc_wait_for_cmd(host, &cmd, 5);
- if (err)
- return err;
-
- *stop_status = cmd.resp[0];
-
- /* No need to check card status in case of READ. */
- if (rq_data_dir(req) == READ)
- return 0;
-
- if (!mmc_host_is_spi(host) &&
- (*stop_status & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop command, resp %#x\n",
- req->rq_disk->disk_name, __func__, *stop_status);
- *gen_err = true;
- }
-
- return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
-}
-
-#define ERR_NOMEDIUM 3
-#define ERR_RETRY 2
-#define ERR_ABORT 1
-#define ERR_CONTINUE 0
-
-static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
- bool status_valid, u32 status)
-{
- switch (error) {
- case -EILSEQ:
- /* response crc error, retry the r/w cmd */
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "response CRC error",
- name, status);
- return ERR_RETRY;
-
- case -ETIMEDOUT:
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "timed out", name, status);
-
- /* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid) {
- pr_err("%s: status not valid, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /*
- * If it was a r/w cmd crc error, or illegal command
- * (eg, issued in wrong state) then retry - we should
- * have corrected the state problem above.
- */
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
- pr_err("%s: command error, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /* Otherwise abort the command */
- return ERR_ABORT;
-
- default:
- /* We don't understand the error code the driver gave us */
- pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
- req->rq_disk->disk_name, error, status);
- return ERR_ABORT;
- }
-}
-
-/*
- * Initial r/w and stop cmd error recovery.
- * We don't know whether the card received the r/w cmd or not, so try to
- * restore things back to a sane state. Essentially, we do this as follows:
- * - Obtain card status. If the first attempt to obtain card status fails,
- * the status word will reflect the failed status cmd, not the failed
- * r/w cmd. If we fail to obtain card status, it suggests we can no
- * longer communicate with the card.
- * - Check the card state. If the card received the cmd but there was a
- * transient problem with the response, it might still be in a data transfer
- * mode. Try to send it a stop command. If this fails, we can't recover.
- * - If the r/w cmd failed due to a response CRC error, it was probably
- * transient, so retry the cmd.
- * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
- * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
- * illegal cmd, retry.
- * Otherwise we don't understand what happened, so abort.
- */
-static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
-{
- bool prev_cmd_status_valid = true;
- u32 status, stop_status = 0;
- int err, retry;
-
- if (mmc_card_removed(card))
- return ERR_NOMEDIUM;
-
- /*
- * Try to get card status which indicates both the card state
- * and why there was no response. If the first attempt fails,
- * we can't be sure the returned status is for the r/w command.
- */
- for (retry = 2; retry >= 0; retry--) {
- err = __mmc_send_status(card, &status, 0);
- if (!err)
- break;
-
- /* Re-tune if needed */
- mmc_retune_recheck(card->host);
-
- prev_cmd_status_valid = false;
- pr_err("%s: error %d sending status command, %sing\n",
- req->rq_disk->disk_name, err, retry ? "retry" : "abort");
- }
-
- /* We couldn't get a response from the card. Give up. */
- if (err) {
- /* Check if the card is removed */
- if (mmc_detect_card_removed(card->host))
- return ERR_NOMEDIUM;
- return ERR_ABORT;
- }
-
- /* Flag ECC errors */
- if ((status & R1_CARD_ECC_FAILED) ||
- (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
- (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
- *ecc_err = true;
-
- /* Flag General errors */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
- if ((status & R1_ERROR) ||
- (brq->stop.resp[0] & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0], status);
- *gen_err = true;
- }
-
- /*
- * Check the current card state. If it is in some data transfer
- * mode, tell it to stop (and hopefully transition back to TRAN.)
- */
- if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
- R1_CURRENT_STATE(status) == R1_STATE_RCV) {
- err = send_stop(card,
- DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
- req, gen_err, &stop_status);
- if (err) {
- pr_err("%s: error %d sending stop command\n",
- req->rq_disk->disk_name, err);
- /*
- * If the stop cmd also timed out, the card is probably
- * not present, so abort. Other errors are bad news too.
- */
- return ERR_ABORT;
- }
-
- if (stop_status & R1_CARD_ECC_FAILED)
- *ecc_err = true;
- }
-
- /* Check for set block count errors */
- if (brq->sbc.error)
- return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
- prev_cmd_status_valid, status);
-
- /* Check for r/w command errors */
- if (brq->cmd.error)
- return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
- prev_cmd_status_valid, status);
-
- /* Data errors */
- if (!brq->stop.error)
- return ERR_CONTINUE;
-
- /* Now for stop errors. These aren't fatal to the transfer. */
- pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->stop.error,
- brq->cmd.resp[0], status);
-
- /*
- * Subsitute in our own stop status as this will give the error
- * state which happened during the execution of the r/w command.
- */
- if (stop_status) {
- brq->stop.resp[0] = stop_status;
- brq->stop.error = 0;
- }
- return ERR_CONTINUE;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1281,7 +1103,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
mq_rq->drv_op_result = ret;
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1324,7 +1146,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
else
mmc_blk_reset_success(md, type);
fail:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1394,7 +1216,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1404,7 +1226,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
int ret = 0;
ret = mmc_flush_cache(card);
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/*
@@ -1430,15 +1252,18 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-#define CMD_ERRORS \
- (R1_OUT_OF_RANGE | /* Command argument out of range */ \
- R1_ADDRESS_ERROR | /* Misaligned address */ \
+#define CMD_ERRORS_EXCL_OOR \
+ (R1_ADDRESS_ERROR | /* Misaligned address */ \
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
R1_WP_VIOLATION | /* Tried to write to protected block */ \
R1_CARD_ECC_FAILED | /* Card ECC failed */ \
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
+#define CMD_ERRORS \
+ (CMD_ERRORS_EXCL_OOR | \
+ R1_OUT_OF_RANGE) /* Command argument out of range */ \
+
static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{
u32 val;
@@ -1481,116 +1306,6 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
}
-static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
- areq);
- struct mmc_blk_request *brq = &mq_mrq->brq;
- struct request *req = mmc_queue_req_to_req(mq_mrq);
- int need_retune = card->host->need_retune;
- bool ecc_err = false;
- bool gen_err = false;
-
- /*
- * sbc.error indicates a problem with the set block count
- * command. No data will have been transferred.
- *
- * cmd.error indicates a problem with the r/w command. No
- * data will have been transferred.
- *
- * stop.error indicates a problem with the stop command. Data
- * may have been transferred, or may still be transferring.
- */
-
- mmc_blk_eval_resp_error(brq);
-
- if (brq->sbc.error || brq->cmd.error ||
- brq->stop.error || brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
- case ERR_RETRY:
- return MMC_BLK_RETRY;
- case ERR_ABORT:
- return MMC_BLK_ABORT;
- case ERR_NOMEDIUM:
- return MMC_BLK_NOMEDIUM;
- case ERR_CONTINUE:
- break;
- }
- }
-
- /*
- * Check for errors relating to the execution of the
- * initial command - such as address errors. No data
- * has been transferred.
- */
- if (brq->cmd.resp[0] & CMD_ERRORS) {
- pr_err("%s: r/w command failed, status = %#x\n",
- req->rq_disk->disk_name, brq->cmd.resp[0]);
- return MMC_BLK_ABORT;
- }
-
- /*
- * Everything else is either success, or a data error of some
- * kind. If it was a write, we may have transitioned to
- * program mode, which we have to wait for it to complete.
- */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- int err;
-
- /* Check stop command response */
- if (brq->stop.resp[0] & R1_ERROR) {
- pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0]);
- gen_err = true;
- }
-
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
- &gen_err);
- if (err)
- return MMC_BLK_CMD_ERR;
- }
-
- /* if general error occurs, retry the write operation. */
- if (gen_err) {
- pr_warn("%s: retrying write for general error\n",
- req->rq_disk->disk_name);
- return MMC_BLK_RETRY;
- }
-
- /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
- if (brq->data.error || brq->stop.error) {
- if (need_retune && !brq->retune_retry_done) {
- pr_debug("%s: retrying because a re-tune was needed\n",
- req->rq_disk->disk_name);
- brq->retune_retry_done = 1;
- return MMC_BLK_RETRY;
- }
- pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req),
- brq->cmd.resp[0], brq->stop.resp[0]);
-
- if (rq_data_dir(req) == READ) {
- if (ecc_err)
- return MMC_BLK_ECC_ERR;
- return MMC_BLK_DATA_ERR;
- } else {
- return MMC_BLK_CMD_ERR;
- }
- }
-
- if (!brq->data.bytes_xfered)
- return MMC_BLK_RETRY;
-
- if (blk_rq_bytes(req) != brq->data.bytes_xfered)
- return MMC_BLK_PARTIAL;
-
- return MMC_BLK_SUCCESS;
-}
-
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int disable_multi, bool *do_rel_wr_p,
bool *do_data_tag_p)
@@ -1706,8 +1421,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.sg_len = i;
}
- mqrq->areq.mrq = &brq->mrq;
-
if (do_rel_wr_p)
*do_rel_wr_p = do_rel_wr;
@@ -1715,6 +1428,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
*do_data_tag_p = do_data_tag;
}
+#define MMC_CQE_RETRIES 2
+
+static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+ bool put_card;
+ int err;
+
+ mmc_cqe_post_req(host, mrq);
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+ else
+ err = 0;
+
+ if (err) {
+ if (mqrq->retries++ < MMC_CQE_RETRIES)
+ blk_mq_requeue_request(req, true);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mrq->data) {
+ if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ mmc_cqe_check_busy(mq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!mq->cqe_busy)
+ blk_mq_run_hw_queues(q, true);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
+
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+ else
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+}
+
+static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+}
+
+static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_blk_cqe_req_done;
+ mrq->recovery_notifier = mmc_cqe_recovery_notifier;
+
+ return mmc_cqe_start_req(host, mrq);
+}
+
+static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
+ struct request *req)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+
+ memset(brq, 0, sizeof(*brq));
+
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.tag = req->tag;
+
+ return &brq->mrq;
+}
+
+static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
+
+ mrq->cmd->opcode = MMC_SWITCH;
+ mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_FLUSH_CACHE << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
+
+ return mmc_blk_cqe_start_req(mq->card->host, mrq);
+}
+
+static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
+
+ return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1779,318 +1624,637 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
+}
+
+#define MMC_MAX_RETRIES 5
+#define MMC_DATA_RETRIES 2
+#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
- mqrq->areq.err_check = mmc_blk_err_check;
+static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
+{
+ struct mmc_command cmd = {
+ .opcode = MMC_STOP_TRANSMISSION,
+ .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
+ /* Some hosts wait for busy anyway, so provide a busy timeout */
+ .busy_timeout = timeout,
+ };
+
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
}
-static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
- struct mmc_blk_request *brq, struct request *req,
- bool old_req_pending)
+static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{
- bool req_pending;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
+ int err;
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
- * If the card is not SD, we can still ok written sectors
- * as reported by the controller (which might be less than
- * the real number of written sectors, but never more).
- */
- if (mmc_card_sd(card)) {
- u32 blocks;
+ mmc_retune_hold_now(card->host);
+
+ mmc_blk_send_stop(card, timeout);
+
+ err = card_busy_detect(card, timeout, req, NULL);
+
+ mmc_retune_release(card->host);
+
+ return err;
+}
+
+#define MMC_READ_SINGLE_RETRIES 2
+
+/* Single sector read during recovery */
+static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ blk_status_t error = BLK_STS_OK;
+ int retries = 0;
+
+ do {
+ u32 status;
int err;
- err = mmc_sd_num_wr_blocks(card, &blocks);
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+
+ mmc_wait_for_req(host, mrq);
+
+ err = mmc_send_status(card, &status);
if (err)
- req_pending = old_req_pending;
+ goto error_exit;
+
+ if (!mmc_host_is_spi(host) &&
+ !mmc_blk_in_tran_state(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
+
+ if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
+ continue;
+
+ retries = 0;
+
+ if (mrq->cmd->error ||
+ mrq->data->error ||
+ (!mmc_host_is_spi(host) &&
+ (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
+ error = BLK_STS_IOERR;
else
- req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
- } else {
- req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
- }
- return req_pending;
+ error = BLK_STS_OK;
+
+ } while (blk_update_request(req, error, 512));
+
+ return;
+
+error_exit:
+ mrq->data->bytes_xfered = 0;
+ blk_update_request(req, BLK_STS_IOERR, 512);
+ /* Let it try the remaining request again */
+ if (mqrq->retries > MMC_MAX_RETRIES - 1)
+ mqrq->retries = MMC_MAX_RETRIES - 1;
}
-static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
- struct request *req,
- struct mmc_queue_req *mqrq)
+static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
- if (mmc_card_removed(card))
- req->rq_flags |= RQF_QUIET;
- while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
- mq->qcnt--;
+ return !!brq->mrq.sbc;
}
-/**
- * mmc_blk_rw_try_restart() - tries to restart the current async request
- * @mq: the queue with the card and host to restart
- * @req: a new request that want to be started after the current one
+static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
+{
+ return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
+}
+
+/*
+ * Check for errors the host controller driver might not have seen such as
+ * response mode errors or invalid card state.
+ */
+static bool mmc_blk_status_error(struct request *req, u32 status)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_queue *mq = req->q->queuedata;
+ u32 stop_err_bits;
+
+ if (mmc_host_is_spi(mq->card->host))
+ return false;
+
+ stop_err_bits = mmc_blk_stop_err_bits(brq);
+
+ return brq->cmd.resp[0] & CMD_ERRORS ||
+ brq->stop.resp[0] & stop_err_bits ||
+ status & stop_err_bits ||
+ (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
+}
+
+static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
+{
+ return !brq->sbc.error && !brq->cmd.error &&
+ !(brq->cmd.resp[0] & CMD_ERRORS);
+}
+
+/*
+ * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
+ * policy:
+ * 1. A request that has transferred at least some data is considered
+ * successful and will be requeued if there is remaining data to
+ * transfer.
+ * 2. Otherwise the number of retries is incremented and the request
+ * will be requeued if there are remaining retries.
+ * 3. Otherwise the request will be errored out.
+ * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
+ * mqrq->retries. So there are only 4 possible actions here:
+ * 1. do not accept the bytes_xfered value i.e. set it to zero
+ * 2. change mqrq->retries to determine the number of retries
+ * 3. try to reset the card
+ * 4. read one sector at a time
*/
-static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
- struct mmc_queue_req *mqrq)
+static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{
- if (!req)
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = mq->card;
+ u32 status;
+ u32 blocks;
+ int err;
+
+ /*
+ * Some errors the host driver might not have seen. Set the number of
+ * bytes transferred to zero in that case.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err || mmc_blk_status_error(req, status))
+ brq->data.bytes_xfered = 0;
+
+ mmc_retune_release(card->host);
+
+ /*
+ * Try again to get the status. This also provides an opportunity for
+ * re-tuning.
+ */
+ if (err)
+ err = __mmc_send_status(card, &status, 0);
+
+ /*
+ * Nothing more to do after the number of bytes transferred has been
+ * updated and there is no card.
+ */
+ if (err && mmc_detect_card_removed(card->host))
return;
+ /* Try to get back to "tran" state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ (err || !mmc_blk_in_tran_state(status)))
+ err = mmc_blk_fix_state(mq->card, req);
+
/*
- * If the card was removed, just cancel everything and return.
+ * Special case for SD cards where the card might record the number of
+ * blocks written.
*/
- if (mmc_card_removed(mq->card)) {
- req->rq_flags |= RQF_QUIET;
- blk_end_request_all(req, BLK_STS_IOERR);
- mq->qcnt--; /* FIXME: just set to 0? */
+ if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
+ rq_data_dir(req) == WRITE) {
+ if (mmc_sd_num_wr_blocks(card, &blocks))
+ brq->data.bytes_xfered = 0;
+ else
+ brq->data.bytes_xfered = blocks << 9;
+ }
+
+ /* Reset if the card is in a bad state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ err && mmc_blk_reset(md, card->host, type)) {
+ pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ mqrq->retries = MMC_NO_RETRIES;
+ return;
+ }
+
+ /*
+ * If anything was done, just return and if there is anything remaining
+ * on the request it will get requeued.
+ */
+ if (brq->data.bytes_xfered)
+ return;
+
+ /* Reset before last retry */
+ if (mqrq->retries + 1 == MMC_MAX_RETRIES)
+ mmc_blk_reset(md, card->host, type);
+
+ /* Command errors fail fast, so use all MMC_MAX_RETRIES */
+ if (brq->sbc.error || brq->cmd.error)
+ return;
+
+ /* Reduce the remaining retries for data errors */
+ if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
+ mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
+ return;
+ }
+
+ /* FIXME: Missing single sector read for large sector size */
+ if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
+ brq->data.blocks > 1) {
+ /* Read one sector at a time */
+ mmc_blk_read_single(mq, req);
return;
}
- /* Else proceed and try to restart the current async request */
- mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
- mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
}
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
+static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{
- struct mmc_blk_data *md = mq->blkdata;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request *brq;
- int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
- enum mmc_blk_status status;
- struct mmc_queue_req *mqrq_cur = NULL;
- struct mmc_queue_req *mq_rq;
- struct request *old_req;
- struct mmc_async_req *new_areq;
- struct mmc_async_req *old_areq;
- bool req_pending = true;
+ mmc_blk_eval_resp_error(brq);
+
+ return brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
+}
+
+static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ u32 status = 0;
+ int err;
+
+ if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ return 0;
+
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+
+ /*
+ * Do not assume data transferred correctly if there are any error bits
+ * set.
+ */
+ if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ mqrq->brq.data.bytes_xfered = 0;
+ err = err ? err : -EIO;
+ }
+
+ /* Copy the exception bit so it will be seen later on */
+ if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
+
+ return err;
+}
- if (new_req) {
- mqrq_cur = req_to_mmc_queue_req(new_req);
- mq->qcnt++;
+static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
+ struct request *req)
+{
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+
+ mmc_blk_reset_success(mq->blkdata, type);
+}
+
+static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
+
+ if (nr_bytes) {
+ if (blk_update_request(req, BLK_STS_OK, nr_bytes))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else if (!blk_rq_bytes(req)) {
+ __blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+ blk_mq_requeue_request(req, true);
+ } else {
+ if (mmc_card_removed(mq->card))
+ req->rq_flags |= RQF_QUIET;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
+ (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
+ mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
+}
+
+static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ if (mmc_blk_urgent_bkops_needed(mq, mqrq))
+ mmc_start_bkops(mq->card, true);
+}
+
+void mmc_blk_mq_complete(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ mmc_blk_mq_complete_rq(mq, req);
+}
+
+static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_card_busy(mq->card, req)) {
+ mmc_blk_mq_rw_recovery(mq, req);
+ } else {
+ mmc_blk_rw_reset_success(mq, req);
+ mmc_retune_release(host);
+ }
+
+ mmc_blk_urgent_bkops(mq, mqrq);
+}
+
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+
+ mmc_post_req(host, mrq, 0);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_mq_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+
+ mmc_blk_mq_dec_in_flight(mq, req);
+}
+
+void mmc_blk_mq_recovery(struct mmc_queue *mq)
+{
+ struct request *req = mq->recovery_req;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mq->recovery_req = NULL;
+ mq->rw_wait = false;
+
+ if (mmc_blk_rq_error(&mqrq->brq)) {
+ mmc_retune_hold_now(host);
+ mmc_blk_mq_rw_recovery(mq, req);
}
- if (!mq->qcnt)
+ mmc_blk_urgent_bkops(mq, mqrq);
+
+ mmc_blk_mq_post_req(mq, req);
+}
+
+static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
+ struct request **prev_req)
+{
+ if (mmc_host_done_complete(mq->card->host))
return;
- do {
- if (new_req) {
- /*
- * When 4KB native sector is enabled, only 8 blocks
- * multiple read or write is allowed
- */
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
- pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- new_req->rq_disk->disk_name);
- mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
- return;
- }
+ mutex_lock(&mq->complete_lock);
- mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
- new_areq = &mqrq_cur->areq;
- } else
- new_areq = NULL;
+ if (!mq->complete_req)
+ goto out_unlock;
- old_areq = mmc_start_areq(card->host, new_areq, &status);
- if (!old_areq) {
- /*
- * We have just put the first request into the pipeline
- * and there is nothing more to do until it is
- * complete.
- */
- return;
- }
+ mmc_blk_mq_poll_completion(mq, mq->complete_req);
+
+ if (prev_req)
+ *prev_req = mq->complete_req;
+ else
+ mmc_blk_mq_post_req(mq, mq->complete_req);
+
+ mq->complete_req = NULL;
+
+out_unlock:
+ mutex_unlock(&mq->complete_lock);
+}
+
+void mmc_blk_mq_complete_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ complete_work);
+
+ mmc_blk_mq_complete_prev_req(mq, NULL);
+}
+
+static void mmc_blk_mq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (!mmc_host_done_complete(host)) {
+ bool waiting;
/*
- * An asynchronous request has been completed and we proceed
- * to handle the result of it.
+ * We cannot complete the request in this context, so record
+ * that there is a request to complete, and that a following
+ * request does not need to wait (although it does need to
+ * complete complete_req first).
*/
- mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
- brq = &mq_rq->brq;
- old_req = mmc_queue_req_to_req(mq_rq);
- type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-
- switch (status) {
- case MMC_BLK_SUCCESS:
- case MMC_BLK_PARTIAL:
- /*
- * A block was successfully transferred.
- */
- mmc_blk_reset_success(md, type);
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->complete_req = req;
+ mq->rw_wait = false;
+ waiting = mq->waiting;
+ spin_unlock_irqrestore(q->queue_lock, flags);
- req_pending = blk_end_request(old_req, BLK_STS_OK,
- brq->data.bytes_xfered);
- /*
- * If the blk_end_request function returns non-zero even
- * though all data has been transferred and no errors
- * were returned by the host controller, it's a bug.
- */
- if (status == MMC_BLK_SUCCESS && req_pending) {
- pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(old_req),
- brq->data.bytes_xfered);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- return;
- }
- break;
- case MMC_BLK_CMD_ERR:
- req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
- if (mmc_blk_reset(md, card->host, type)) {
- if (req_pending)
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- else
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_RETRY:
- retune_retry_done = brq->retune_retry_done;
- if (retry++ < 5)
- break;
- /* Fall through */
- case MMC_BLK_ABORT:
- if (!mmc_blk_reset(md, card->host, type))
- break;
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- case MMC_BLK_DATA_ERR: {
- int err;
-
- err = mmc_blk_reset(md, card->host, type);
- if (!err)
- break;
- if (err == -ENODEV) {
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- /* Fall through */
- }
- case MMC_BLK_ECC_ERR:
- if (brq->data.blocks > 1) {
- /* Redo read one sector at a time */
- pr_warn("%s: retrying using single block read\n",
- old_req->rq_disk->disk_name);
- disable_multi = 1;
- break;
- }
- /*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
- */
- req_pending = blk_end_request(old_req, BLK_STS_IOERR,
- brq->data.blksz);
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_NOMEDIUM:
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- default:
- pr_err("%s: Unhandled return value (%d)",
- old_req->rq_disk->disk_name, status);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
+ /*
+ * If 'waiting' then the waiting task will complete this
+ * request, otherwise queue a work to do it. Note that
+ * complete_work may still race with the dispatch of a following
+ * request.
+ */
+ if (waiting)
+ wake_up(&mq->wait);
+ else
+ kblockd_schedule_work(&mq->complete_work);
- if (req_pending) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_areq(card->host,
- &mq_rq->areq, NULL);
- mq_rq->brq.retune_retry_done = retune_retry_done;
- }
- } while (req_pending);
+ return;
+ }
+
+ /* Take the recovery path for errors or urgent background operations */
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ wake_up(&mq->wait);
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
- mq->qcnt--;
+ mq->rw_wait = false;
+ wake_up(&mq->wait);
+
+ mmc_blk_mq_post_req(mq, req);
}
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+ bool done;
+
+ /*
+ * Wait while there is another request in progress, but not if recovery
+ * is needed. Also indicate whether there is a request waiting to start.
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (mq->recovery_needed) {
+ *err = -EBUSY;
+ done = true;
+ } else {
+ done = !mq->rw_wait;
+ }
+ mq->waiting = !done;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return done;
+}
+
+static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
+{
+ int err = 0;
+
+ wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
+
+ /* Always complete the previous request if there is one */
+ mmc_blk_mq_complete_prev_req(mq, prev_req);
+
+ return err;
+}
+
+static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ struct request *prev_req = NULL;
+ int err = 0;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+
+ mqrq->brq.mrq.done = mmc_blk_mq_req_done;
+
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_blk_rw_wait(mq, &prev_req);
+ if (err)
+ goto out_post_req;
+
+ mq->rw_wait = true;
+
+ err = mmc_start_request(host, &mqrq->brq.mrq);
+
+ if (prev_req)
+ mmc_blk_mq_post_req(mq, prev_req);
+
+ if (err)
+ mq->rw_wait = false;
+
+ /* Release re-tuning here where there is no synchronization required */
+ if (err || mmc_host_done_complete(host))
+ mmc_retune_release(host);
+
+out_post_req:
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
+static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
+{
+ if (mq->use_cqe)
+ return host->cqe_ops->cqe_wait_for_idle(host);
+
+ return mmc_blk_rw_wait(mq, NULL);
+}
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
{
- int ret;
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
-
- if (req && !mq->qcnt)
- /* claim host only for the first request */
- mmc_get_card(card, NULL);
+ struct mmc_host *host = card->host;
+ int ret;
ret = mmc_blk_part_switch(card, md->part_type);
- if (ret) {
- if (req) {
- blk_end_request_all(req, BLK_STS_IOERR);
- }
- goto out;
- }
+ if (ret)
+ return MMC_REQ_FAILED_TO_START;
- if (req) {
+ switch (mmc_issue_type(mq, req)) {
+ case MMC_ISSUE_SYNC:
+ ret = mmc_blk_wait_for_idle(mq, host);
+ if (ret)
+ return MMC_REQ_BUSY;
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- /*
- * Complete ongoing async transfer before issuing
- * ioctl()s
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_drv_op(mq, req);
break;
case REQ_OP_DISCARD:
- /*
- * Complete ongoing async transfer before issuing
- * discard.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_discard_rq(mq, req);
break;
case REQ_OP_SECURE_ERASE:
- /*
- * Complete ongoing async transfer before issuing
- * secure erase.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_secdiscard_rq(mq, req);
break;
case REQ_OP_FLUSH:
- /*
- * Complete ongoing async transfer before issuing
- * flush.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_flush(mq, req);
break;
default:
- /* Normal request, just issue it */
- mmc_blk_issue_rw_rq(mq, req);
- card->host->context_info.is_waiting_last_req = false;
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+ return MMC_REQ_FINISHED;
+ case MMC_ISSUE_DCMD:
+ case MMC_ISSUE_ASYNC:
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ ret = mmc_blk_cqe_issue_flush(mq, req);
break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ if (mq->use_cqe)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+ ret = mmc_blk_mq_issue_rw_rq(mq, req);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
}
- } else {
- /* No request, flushing the pipeline with NULL */
- mmc_blk_issue_rw_rq(mq, NULL);
- card->host->context_info.is_waiting_last_req = false;
+ if (!ret)
+ return MMC_REQ_STARTED;
+ return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
}
-
-out:
- if (!mq->qcnt)
- mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2156,6 +2320,18 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->queue.blkdata = md;
+ /*
+ * Keep an extra reference to the queue so that we can shutdown the
+ * queue (i.e. call blk_cleanup_queue()) while there are still
+ * references to the 'md'. The corresponding blk_put_queue() is in
+ * mmc_blk_put().
+ */
+ if (!blk_get_queue(md->queue.queue)) {
+ mmc_cleanup_queue(&md->queue);
+ ret = -ENODEV;
+ goto err_putdisk;
+ }
+
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
@@ -2471,10 +2647,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
- spin_lock_irq(md->queue.queue->queue_lock);
- queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
- spin_unlock_irq(md->queue.queue->queue_lock);
- blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
@@ -2623,6 +2795,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
if (n != EXT_CSD_STR_LEN) {
err = -EINVAL;
+ kfree(ext_csd);
goto out_free;
}
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 5946636101ef..31153f656f41 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -5,6 +5,16 @@
struct mmc_queue;
struct request;
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_cqe_recovery(struct mmc_queue *mq);
+
+enum mmc_issued;
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_mq_complete(struct request *req);
+void mmc_blk_mq_recovery(struct mmc_queue *mq);
+
+struct work_struct;
+
+void mmc_blk_mq_complete_work(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7586ff2ad1f1..fc92c6c1c9a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -351,8 +351,6 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
- mmc_init_context_info(card->host);
-
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1f0f44f4dd5f..c0ba6d8823b7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -341,6 +341,8 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ init_completion(&mrq->cmd_completion);
+
mmc_retune_hold(host);
if (mmc_card_removed(host->card))
@@ -361,20 +363,6 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_start_request);
-/*
- * mmc_wait_data_done() - done callback for data request
- * @mrq: done data request
- *
- * Wakes up mmc context, passed as a callback to host controller driver
- */
-static void mmc_wait_data_done(struct mmc_request *mrq)
-{
- struct mmc_context_info *context_info = &mrq->host->context_info;
-
- context_info->is_done_rcv = true;
- wake_up_interruptible(&context_info->wait);
-}
-
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
@@ -392,37 +380,6 @@ static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
wait_for_completion(&ongoing_mrq->cmd_completion);
}
-/*
- *__mmc_start_data_req() - starts data request
- * @host: MMC host to start the request
- * @mrq: data request to start
- *
- * Sets the done callback to be called when request is completed by the card.
- * Starts data mmc request execution
- * If an ongoing transfer is already in progress, wait for the command line
- * to become available before sending another command.
- */
-static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- int err;
-
- mmc_wait_ongoing_tfr_cmd(host);
-
- mrq->done = mmc_wait_data_done;
- mrq->host = host;
-
- init_completion(&mrq->cmd_completion);
-
- err = mmc_start_request(host, mrq);
- if (err) {
- mrq->cmd->error = err;
- mmc_complete_cmd(mrq);
- mmc_wait_data_done(mrq);
- }
-
- return err;
-}
-
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -432,8 +389,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
- init_completion(&mrq->cmd_completion);
-
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
@@ -650,164 +605,11 @@ EXPORT_SYMBOL(mmc_cqe_recovery);
*/
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
- if (host->areq)
- return host->context_info.is_done_rcv;
- else
- return completion_done(&mrq->completion);
+ return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
/**
- * mmc_pre_req - Prepare for a new request
- * @host: MMC host to prepare command
- * @mrq: MMC request to prepare for
- *
- * mmc_pre_req() is called in prior to mmc_start_req() to let
- * host prepare for the new request. Preparation of a request may be
- * performed while another request is running on the host.
- */
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- if (host->ops->pre_req)
- host->ops->pre_req(host, mrq);
-}
-
-/**
- * mmc_post_req - Post process a completed request
- * @host: MMC host to post process command
- * @mrq: MMC request to post process for
- * @err: Error, if non zero, clean up any resources made in pre_req
- *
- * Let the host post process a completed request. Post processing of
- * a request may be performed while another reuqest is running.
- */
-static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
- int err)
-{
- if (host->ops->post_req)
- host->ops->post_req(host, mrq, err);
-}
-
-/**
- * mmc_finalize_areq() - finalize an asynchronous request
- * @host: MMC host to finalize any ongoing request on
- *
- * Returns the status of the ongoing asynchronous request, but
- * MMC_BLK_SUCCESS if no request was going on.
- */
-static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
-{
- struct mmc_context_info *context_info = &host->context_info;
- enum mmc_blk_status status;
-
- if (!host->areq)
- return MMC_BLK_SUCCESS;
-
- while (1) {
- wait_event_interruptible(context_info->wait,
- (context_info->is_done_rcv ||
- context_info->is_new_req));
-
- if (context_info->is_done_rcv) {
- struct mmc_command *cmd;
-
- context_info->is_done_rcv = false;
- cmd = host->areq->mrq->cmd;
-
- if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card)) {
- status = host->areq->err_check(host->card,
- host->areq);
- break; /* return status */
- } else {
- mmc_retune_recheck(host);
- pr_info("%s: req failed (CMD%u): %d, retrying...\n",
- mmc_hostname(host),
- cmd->opcode, cmd->error);
- cmd->retries--;
- cmd->error = 0;
- __mmc_start_request(host, host->areq->mrq);
- continue; /* wait for done/new event again */
- }
- }
-
- return MMC_BLK_NEW_REQUEST;
- }
-
- mmc_retune_release(host);
-
- /*
- * Check BKOPS urgency for each R1 response
- */
- if (host->card && mmc_card_mmc(host->card) &&
- ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
- (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
- (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
- mmc_start_bkops(host->card, true);
- }
-
- return status;
-}
-
-/**
- * mmc_start_areq - start an asynchronous request
- * @host: MMC host to start command
- * @areq: asynchronous request to start
- * @ret_stat: out parameter for status
- *
- * Start a new MMC custom command request for a host.
- * If there is on ongoing async request wait for completion
- * of that request and start the new one and return.
- * Does not wait for the new request to complete.
- *
- * Returns the completed request, NULL in case of none completed.
- * Wait for the an ongoing request (previoulsy started) to complete and
- * return the completed request. If there is no ongoing request, NULL
- * is returned without waiting. NULL is not an error condition.
- */
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat)
-{
- enum mmc_blk_status status;
- int start_err = 0;
- struct mmc_async_req *previous = host->areq;
-
- /* Prepare a new request */
- if (areq)
- mmc_pre_req(host, areq->mrq);
-
- /* Finalize previous request */
- status = mmc_finalize_areq(host);
- if (ret_stat)
- *ret_stat = status;
-
- /* The previous request is still going on... */
- if (status == MMC_BLK_NEW_REQUEST)
- return NULL;
-
- /* Fine so far, start the new request! */
- if (status == MMC_BLK_SUCCESS && areq)
- start_err = __mmc_start_data_req(host, areq->mrq);
-
- /* Postprocess the old request at this point */
- if (host->areq)
- mmc_post_req(host, host->areq->mrq, 0);
-
- /* Cancel a prepared request if it was not started. */
- if ((status != MMC_BLK_SUCCESS || start_err) && areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- if (status != MMC_BLK_SUCCESS)
- host->areq = NULL;
- else
- host->areq = areq;
-
- return previous;
-}
-EXPORT_SYMBOL(mmc_start_areq);
-
-/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
@@ -2959,6 +2761,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block,
if (!err)
break;
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "pre_suspend failed for non-removable host: "
+ "%d\n", err);
+ /* Avoid removing non-removable hosts */
+ break;
+ }
+
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
@@ -2994,22 +2804,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
}
#endif
-/**
- * mmc_init_context_info() - init synchronization context
- * @host: mmc host
- *
- * Init struct context_info needed to implement asynchronous
- * request mechanism, used by mmc core, host driver and mmc requests
- * supplier.
- */
-void mmc_init_context_info(struct mmc_host *host)
-{
- host->context_info.is_new_req = false;
- host->context_info.is_done_rcv = false;
- host->context_info.is_waiting_last_req = false;
- init_waitqueue_head(&host->context_info.wait);
-}
-
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 71e6c6d7ceb7..d6303d69071b 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -62,12 +62,10 @@ void mmc_set_initial_state(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
- if (ms < 1000 / HZ) {
- cond_resched();
- mdelay(ms);
- } else {
+ if (ms <= 20)
+ usleep_range(ms * 1000, ms * 1250);
+ else
msleep(ms);
- }
}
void mmc_rescan(struct work_struct *work);
@@ -91,8 +89,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
-void mmc_init_context_info(struct mmc_host *host);
-
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
@@ -110,12 +106,6 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
-struct mmc_async_req;
-
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat);
-
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg);
int mmc_can_erase(struct mmc_card *card);
@@ -152,4 +142,35 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_cqe_recovery(struct mmc_host *host);
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static inline void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another request is running.
+ */
+static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
#endif
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb689a1065ed..06ec19b5bf9f 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host)
return host->caps & MMC_CAP_CMD23;
}
+static inline bool mmc_host_done_complete(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_DONE_COMPLETE;
+}
+
static inline int mmc_boot_partition_access(struct mmc_host *host)
{
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
@@ -74,6 +79,5 @@ static inline bool mmc_card_hs400es(struct mmc_card *card)
return card->host->ios.enhanced_strobe;
}
-
#endif
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 478869805b96..ef18daeaa4cc 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -101,7 +101,7 @@ struct mmc_test_transfer_result {
struct list_head link;
unsigned int count;
unsigned int sectors;
- struct timespec ts;
+ struct timespec64 ts;
unsigned int rate;
unsigned int iops;
};
@@ -171,11 +171,6 @@ struct mmc_test_multiple_rw {
enum mmc_test_prep_media prepare;
};
-struct mmc_test_async_req {
- struct mmc_async_req areq;
- struct mmc_test_card *test;
-};
-
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -515,14 +510,11 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
/*
* Calculate transfer rate in bytes per second.
*/
-static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
{
uint64_t ns;
- ns = ts->tv_sec;
- ns *= 1000000000;
- ns += ts->tv_nsec;
-
+ ns = timespec64_to_ns(ts);
bytes *= 1000000000;
while (ns > UINT_MAX) {
@@ -542,7 +534,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
* Save transfer results for future usage
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
- unsigned int count, unsigned int sectors, struct timespec ts,
+ unsigned int count, unsigned int sectors, struct timespec64 ts,
unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -567,21 +559,21 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
- struct timespec *ts1, struct timespec *ts2)
+ struct timespec64 *ts1, struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
- pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
- (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
+ (u32)ts.tv_nsec, rate / 1000, rate / 1024,
iops / 100, iops % 100);
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
@@ -591,24 +583,24 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
* Print the average transfer rate.
*/
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
- unsigned int count, struct timespec *ts1,
- struct timespec *ts2)
+ unsigned int count, struct timespec64 *ts1,
+ struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%llu.%09u seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
- (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+ (u64)ts.tv_sec, (u32)ts.tv_nsec,
rate / 1000, rate / 1024, iops / 100, iops % 100,
test->area.sg_len);
@@ -741,30 +733,6 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
-static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_test_async_req *test_async =
- container_of(areq, struct mmc_test_async_req, areq);
- int ret;
-
- mmc_test_wait_busy(test_async->test);
-
- /*
- * FIXME: this would earlier just casts a regular error code,
- * either of the kernel type -ERRORCODE or the local test framework
- * RESULT_* errorcode, into an enum mmc_blk_status and return as
- * result check. Instead, convert it to some reasonable type by just
- * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
- * If possible, a reasonable error code should be returned.
- */
- ret = mmc_test_check_result(test_async->test, areq->mrq);
- if (ret)
- return MMC_BLK_CMD_ERR;
-
- return MMC_BLK_SUCCESS;
-}
-
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -831,6 +799,45 @@ static struct mmc_test_req *mmc_test_req_alloc(void)
return rq;
}
+static void mmc_test_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int mmc_test_start_areq(struct mmc_test_card *test,
+ struct mmc_request *mrq,
+ struct mmc_request *prev_mrq)
+{
+ struct mmc_host *host = test->card->host;
+ int err = 0;
+
+ if (mrq) {
+ init_completion(&mrq->completion);
+ mrq->done = mmc_test_wait_done;
+ mmc_pre_req(host, mrq);
+ }
+
+ if (prev_mrq) {
+ wait_for_completion(&prev_mrq->completion);
+ err = mmc_test_wait_busy(test);
+ if (!err)
+ err = mmc_test_check_result(test, prev_mrq);
+ }
+
+ if (!err && mrq) {
+ err = mmc_start_request(host, mrq);
+ if (err)
+ mmc_retune_release(host);
+ }
+
+ if (prev_mrq)
+ mmc_post_req(host, prev_mrq, 0);
+
+ if (err && mrq)
+ mmc_post_req(host, mrq, err);
+
+ return err;
+}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len,
@@ -838,17 +845,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
unsigned blksz, int write, int count)
{
struct mmc_test_req *rq1, *rq2;
- struct mmc_test_async_req test_areq[2];
- struct mmc_async_req *done_areq;
- struct mmc_async_req *cur_areq = &test_areq[0].areq;
- struct mmc_async_req *other_areq = &test_areq[1].areq;
- enum mmc_blk_status status;
+ struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
- test_areq[0].test = test;
- test_areq[1].test = test;
-
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
if (!rq1 || !rq2) {
@@ -856,33 +856,25 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
goto err;
}
- cur_areq->mrq = &rq1->mrq;
- cur_areq->err_check = mmc_test_check_result_async;
- other_areq->mrq = &rq2->mrq;
- other_areq->err_check = mmc_test_check_result_async;
+ mrq = &rq1->mrq;
+ prev_mrq = NULL;
for (i = 0; i < count; i++) {
- mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
- blocks, blksz, write);
- done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
-
- if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
- ret = RESULT_FAIL;
+ mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
+ mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
+ blksz, write);
+ ret = mmc_test_start_areq(test, mrq, prev_mrq);
+ if (ret)
goto err;
- }
- if (done_areq)
- mmc_test_req_reset(container_of(done_areq->mrq,
- struct mmc_test_req, mrq));
+ if (!prev_mrq)
+ prev_mrq = &rq2->mrq;
- swap(cur_areq, other_areq);
+ swap(mrq, prev_mrq);
dev_addr += blocks;
}
- done_areq = mmc_start_areq(test->card->host, NULL, &status);
- if (status != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
-
+ ret = mmc_test_start_areq(test, NULL, prev_mrq);
err:
kfree(rq1);
kfree(rq2);
@@ -1449,7 +1441,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
int max_scatter, int timed, int count,
bool nonblock, int min_sg_len)
{
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret = 0;
int i;
struct mmc_test_area *t = &test->area;
@@ -1475,7 +1467,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
if (nonblock)
ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
dev_addr, t->blocks, 512, write, count);
@@ -1489,7 +1481,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
if (timed)
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
@@ -1747,7 +1739,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1758,19 +1750,19 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
for (sz = 512; sz < t->max_sz; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
return 0;
}
@@ -1779,19 +1771,19 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1818,7 +1810,7 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
ret = mmc_test_area_erase(test);
@@ -1826,14 +1818,14 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1864,7 +1856,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1882,7 +1874,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
MMC_TRIM_ARG);
@@ -1890,7 +1882,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
return 0;
@@ -1912,7 +1904,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
- struct timespec ts1, ts2, ts;
+ struct timespec64 ts1, ts2, ts;
int ret;
ssz = sz >> 9;
@@ -1921,10 +1913,10 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
range1 = rnd_addr / test->card->pref_erase;
range2 = range1 / ssz;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (cnt = 0; cnt < UINT_MAX; cnt++) {
- getnstimeofday(&ts2);
- ts = timespec_sub(ts2, ts1);
+ ktime_get_ts64(&ts2);
+ ts = timespec64_sub(ts2, ts1);
if (ts.tv_sec >= 10)
break;
ea = mmc_test_rnd_num(range1);
@@ -1998,7 +1990,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt, sz, ssz;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
sz = t->max_tfr;
@@ -2025,7 +2017,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
cnt = tot_sz / sz;
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, write,
max_scatter, 0);
@@ -2033,7 +2025,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
return ret;
dev_addr += ssz;
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
@@ -2328,10 +2320,17 @@ static int mmc_test_reset(struct mmc_test_card *test)
int err;
err = mmc_hw_reset(host);
- if (!err)
+ if (!err) {
+ /*
+ * Reset will re-enable the card's command queue, but tests
+ * expect it to be disabled.
+ */
+ if (card->ext_csd.cmdq_en)
+ mmc_cmdq_disable(card);
return RESULT_OK;
- else if (err == -EOPNOTSUPP)
+ } else if (err == -EOPNOTSUPP) {
return RESULT_UNSUP_HOST;
+ }
return RESULT_FAIL;
}
@@ -2356,11 +2355,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
- enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
@@ -2373,9 +2370,6 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- test_areq.areq.mrq = mrq;
- test_areq.areq.err_check = mmc_test_check_result_async;
-
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2388,11 +2382,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_areq(host, &test_areq.areq, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS) {
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, mrq, NULL);
+ if (ret)
goto out_free;
- }
} else {
mmc_wait_for_req(host, mrq);
}
@@ -2426,9 +2418,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Wait for data request to complete */
if (use_areq) {
- mmc_start_areq(host, NULL, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, NULL, mrq);
} else {
mmc_wait_for_req_done(test->card->host, mrq);
}
@@ -3066,10 +3056,9 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
+ seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
tr->count, tr->sectors,
- (unsigned long)tr->ts.tv_sec,
- (unsigned long)tr->ts.tv_nsec,
+ (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4f33d277b125..421fab7250ac 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -22,100 +22,147 @@
#include "block.h"
#include "core.h"
#include "card.h"
+#include "host.h"
-/*
- * Prepare a MMC request. This just filters out odd stuff.
- */
-static int mmc_prep_request(struct request_queue *q, struct request *req)
+static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
- struct mmc_queue *mq = q->queuedata;
+ /* Allow only 1 DCMD at a time */
+ return mq->in_flight[MMC_ISSUE_DCMD];
+}
- if (mq && mmc_card_removed(mq->card))
- return BLKPREP_KILL;
+void mmc_cqe_check_busy(struct mmc_queue *mq)
+{
+ if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
+ mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
- req->rq_flags |= RQF_DONTPREP;
+ mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
+}
- return BLKPREP_OK;
+static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_CQE_DCMD;
}
-static int mmc_queue_thread(void *d)
+static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
+ struct request *req)
{
- struct mmc_queue *mq = d;
- struct request_queue *q = mq->queue;
- struct mmc_context_info *cntx = &mq->card->host->context_info;
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ return MMC_ISSUE_SYNC;
+ case REQ_OP_FLUSH:
+ return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
+ default:
+ return MMC_ISSUE_ASYNC;
+ }
+}
- current->flags |= PF_MEMALLOC;
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
- down(&mq->thread_sem);
- do {
- struct request *req;
+ if (mq->use_cqe)
+ return mmc_cqe_issue_type(host, req);
- spin_lock_irq(q->queue_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- req = blk_fetch_request(q);
- mq->asleep = false;
- cntx->is_waiting_last_req = false;
- cntx->is_new_req = false;
- if (!req) {
- /*
- * Dispatch queue is empty so set flags for
- * mmc_request_fn() to wake us up.
- */
- if (mq->qcnt)
- cntx->is_waiting_last_req = true;
- else
- mq->asleep = true;
- }
- spin_unlock_irq(q->queue_lock);
+ if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
+ return MMC_ISSUE_ASYNC;
- if (req || mq->qcnt) {
- set_current_state(TASK_RUNNING);
- mmc_blk_issue_rq(mq, req);
- cond_resched();
- } else {
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- up(&mq->thread_sem);
- schedule();
- down(&mq->thread_sem);
- }
- } while (1);
- up(&mq->thread_sem);
+ return MMC_ISSUE_SYNC;
+}
- return 0;
+static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
+{
+ if (!mq->recovery_needed) {
+ mq->recovery_needed = true;
+ schedule_work(&mq->recovery_work);
+ }
}
-/*
- * Generic MMC request handler. This is called for any queue on a
- * particular host. When the host is not busy, we look for a request
- * on any queue on this host, and attempt to issue it. This may
- * not be the queue we were asked to process.
- */
-static void mmc_request_fn(struct request_queue *q)
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
- struct request *req;
- struct mmc_context_info *cntx;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __mmc_cqe_recovery_notifier(mq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
- if (!mq) {
- while ((req = blk_fetch_request(q)) != NULL) {
- req->rq_flags |= RQF_QUIET;
- __blk_end_request_all(req, BLK_STS_IOERR);
+static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool recovery_needed = false;
+
+ switch (issue_type) {
+ case MMC_ISSUE_ASYNC:
+ case MMC_ISSUE_DCMD:
+ if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
+ if (recovery_needed)
+ __mmc_cqe_recovery_notifier(mq);
+ return BLK_EH_RESET_TIMER;
}
- return;
+ /* No timeout */
+ return BLK_EH_HANDLED;
+ default:
+ /* Timeout is handled by mmc core */
+ return BLK_EH_RESET_TIMER;
}
+}
+
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
+ bool reserved)
+{
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ unsigned long flags;
+ int ret;
- cntx = &mq->card->host->context_info;
+ spin_lock_irqsave(q->queue_lock, flags);
- if (cntx->is_waiting_last_req) {
- cntx->is_new_req = true;
- wake_up_interruptible(&cntx->wait);
- }
+ if (mq->recovery_needed || !mq->use_cqe)
+ ret = BLK_EH_RESET_TIMER;
+ else
+ ret = mmc_cqe_timed_out(req);
- if (mq->asleep)
- wake_up_process(mq->thread);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return ret;
+}
+
+static void mmc_mq_recovery_handler(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ recovery_work);
+ struct request_queue *q = mq->queue;
+
+ mmc_get_card(mq->card, &mq->ctx);
+
+ mq->in_recovery = true;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_recovery(mq);
+ else
+ mmc_blk_mq_recovery(mq);
+
+ mq->in_recovery = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->recovery_needed = false;
+ spin_unlock_irq(q->queue_lock);
+
+ mmc_put_card(mq->card, &mq->ctx);
+
+ blk_mq_run_hw_queues(q, true);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
@@ -154,11 +201,10 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* @req: the request
* @gfp: memory allocation policy
*/
-static int mmc_init_request(struct request_queue *q, struct request *req,
- gfp_t gfp)
+static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
+ gfp_t gfp)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
@@ -177,6 +223,131 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
+}
+
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
+{
+ struct mmc_queue *mq = set->driver_data;
+
+ mmc_exit_request(mq->queue, req);
+}
+
+/*
+ * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
+ * will not be dispatched in parallel.
+ */
+static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ enum mmc_issue_type issue_type;
+ enum mmc_issued issued;
+ bool get_card, cqe_retune_ok;
+ int ret;
+
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+ }
+
+ issue_type = mmc_issue_type(mq, req);
+
+ spin_lock_irq(q->queue_lock);
+
+ if (mq->recovery_needed) {
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+
+ switch (issue_type) {
+ case MMC_ISSUE_DCMD:
+ if (mmc_cqe_dcmd_busy(mq)) {
+ mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+ break;
+ case MMC_ISSUE_ASYNC:
+ break;
+ default:
+ /*
+ * Timeouts are handled by mmc core, and we don't have a host
+ * API to abort requests, so we can't handle the timeout anyway.
+ * However, when the timeout happens, blk_mq_complete_request()
+ * no longer works (to stop the request disappearing under us).
+ * To avoid racing with that, set a large timeout.
+ */
+ req->timeout = 600 * HZ;
+ break;
+ }
+
+ mq->in_flight[issue_type] += 1;
+ get_card = (mmc_tot_in_flight(mq) == 1);
+ cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ req_to_mmc_queue_req(req)->retries = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+
+ if (get_card)
+ mmc_get_card(card, &mq->ctx);
+
+ if (mq->use_cqe) {
+ host->retune_now = host->need_retune && cqe_retune_ok &&
+ !host->hold_retune;
+ }
+
+ blk_mq_start_request(req);
+
+ issued = mmc_blk_mq_issue_rq(mq, req);
+
+ switch (issued) {
+ case MMC_REQ_BUSY:
+ ret = BLK_STS_RESOURCE;
+ break;
+ case MMC_REQ_FAILED_TO_START:
+ ret = BLK_STS_IOERR;
+ break;
+ default:
+ ret = BLK_STS_OK;
+ break;
+ }
+
+ if (issued != MMC_REQ_STARTED) {
+ bool put_card = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->in_flight[issue_type] -= 1;
+ if (mmc_tot_in_flight(mq) == 0)
+ put_card = true;
+ spin_unlock_irq(q->queue_lock);
+ if (put_card)
+ mmc_put_card(card, &mq->ctx);
+ }
+
+ return ret;
+}
+
+static const struct blk_mq_ops mmc_mq_ops = {
+ .queue_rq = mmc_mq_queue_rq,
+ .init_request = mmc_mq_init_request,
+ .exit_request = mmc_mq_exit_request,
+ .complete = mmc_blk_mq_complete,
+ .timeout = mmc_mq_timed_out,
+};
+
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -196,124 +367,139 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- /* Initialize thread_sem even if it is not used */
- sema_init(&mq->thread_sem, 1);
+ INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
+ INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
+
+ mutex_init(&mq->complete_lock);
+
+ init_waitqueue_head(&mq->wait);
}
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
+static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
+ const struct blk_mq_ops *mq_ops, spinlock_t *lock)
{
- struct mmc_host *host = card->host;
- int ret = -ENOMEM;
-
- mq->card = card;
- mq->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mq->queue)
- return -ENOMEM;
- mq->queue->queue_lock = lock;
- mq->queue->request_fn = mmc_request_fn;
- mq->queue->init_rq_fn = mmc_init_request;
- mq->queue->exit_rq_fn = mmc_exit_request;
- mq->queue->cmd_size = sizeof(struct mmc_queue_req);
- mq->queue->queuedata = mq;
- mq->qcnt = 0;
- ret = blk_init_allocated_queue(mq->queue);
- if (ret) {
- blk_cleanup_queue(mq->queue);
+ int ret;
+
+ memset(&mq->tag_set, 0, sizeof(mq->tag_set));
+ mq->tag_set.ops = mq_ops;
+ mq->tag_set.queue_depth = q_depth;
+ mq->tag_set.numa_node = NUMA_NO_NODE;
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
+ BLK_MQ_F_BLOCKING;
+ mq->tag_set.nr_hw_queues = 1;
+ mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
+ mq->tag_set.driver_data = mq;
+
+ ret = blk_mq_alloc_tag_set(&mq->tag_set);
+ if (ret)
return ret;
- }
-
- blk_queue_prep_rq(mq->queue, mmc_prep_request);
-
- mmc_setup_queue(mq, card);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
- host->index, subname ? subname : "");
-
- if (IS_ERR(mq->thread)) {
- ret = PTR_ERR(mq->thread);
- goto cleanup_queue;
+ mq->queue = blk_mq_init_queue(&mq->tag_set);
+ if (IS_ERR(mq->queue)) {
+ ret = PTR_ERR(mq->queue);
+ goto free_tag_set;
}
+ mq->queue->queue_lock = lock;
+ mq->queue->queuedata = mq;
+
return 0;
-cleanup_queue:
- blk_cleanup_queue(mq->queue);
+free_tag_set:
+ blk_mq_free_tag_set(&mq->tag_set);
+
return ret;
}
-void mmc_cleanup_queue(struct mmc_queue *mq)
-{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
- /* Make sure the queue isn't suspended, as that will deadlock */
- mmc_queue_resume(mq);
+static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock)
+{
+ struct mmc_host *host = card->host;
+ int q_depth;
+ int ret;
+
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe)
+ q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ q_depth = MMC_QUEUE_DEPTH;
+
+ ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
+ if (ret)
+ return ret;
- /* Then terminate our worker thread */
- kthread_stop(mq->thread);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
- /* Empty the queue */
- spin_lock_irqsave(q->queue_lock, flags);
- q->queuedata = NULL;
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mmc_setup_queue(mq, card);
- mq->card = NULL;
+ return 0;
}
-EXPORT_SYMBOL(mmc_cleanup_queue);
/**
- * mmc_queue_suspend - suspend a MMC request queue
- * @mq: MMC queue to suspend
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
*
- * Stop the block request queue, and wait for our thread to
- * complete any outstanding requests. This ensures that we
- * won't suspend while a request is being processed.
+ * Initialise a MMC card request queue.
*/
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+ struct mmc_host *host = card->host;
- if (!mq->suspended) {
- mq->suspended |= true;
+ mq->card = card;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mq->use_cqe = host->cqe_enabled;
- down(&mq->thread_sem);
- }
+ return mmc_mq_init(mq, card, lock);
+}
+
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ blk_mq_quiesce_queue(mq->queue);
+
+ /*
+ * The host remains claimed while there are outstanding requests, so
+ * simply claiming and releasing here ensures there are none.
+ */
+ mmc_claim_host(mq->card->host);
+ mmc_release_host(mq->card->host);
}
-/**
- * mmc_queue_resume - resume a previously suspended MMC request queue
- * @mq: MMC queue to resume
- */
void mmc_queue_resume(struct mmc_queue *mq)
{
+ blk_mq_unquiesce_queue(mq->queue);
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
struct request_queue *q = mq->queue;
- unsigned long flags;
- if (mq->suspended) {
- mq->suspended = false;
+ /*
+ * The legacy code handled the possibility of being suspended,
+ * so do that here too.
+ */
+ if (blk_queue_quiesced(q))
+ blk_mq_unquiesce_queue(q);
- up(&mq->thread_sem);
+ blk_cleanup_queue(q);
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ /*
+ * A request can be completed before the next request, potentially
+ * leaving a complete_work with nothing to do. Such a work item might
+ * still be queued at this point. Flush it.
+ */
+ flush_work(&mq->complete_work);
+
+ mq->card = NULL;
}
/*
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 547b457c4251..17e59d50b496 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -8,6 +8,20 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+enum mmc_issued {
+ MMC_REQ_STARTED,
+ MMC_REQ_BUSY,
+ MMC_REQ_FAILED_TO_START,
+ MMC_REQ_FINISHED,
+};
+
+enum mmc_issue_type {
+ MMC_ISSUE_SYNC,
+ MMC_ISSUE_DCMD,
+ MMC_ISSUE_ASYNC,
+ MMC_ISSUE_MAX,
+};
+
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
@@ -20,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
return blk_mq_rq_from_pdu(mqr);
}
-struct task_struct;
struct mmc_blk_data;
struct mmc_blk_ioc_data;
@@ -30,7 +43,6 @@ struct mmc_blk_request {
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
- int retune_retry_done;
};
/**
@@ -52,28 +64,34 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
void *drv_op_data;
unsigned int ioc_count;
+ int retries;
};
struct mmc_queue {
struct mmc_card *card;
- struct task_struct *thread;
- struct semaphore thread_sem;
- bool suspended;
- bool asleep;
+ struct mmc_ctx ctx;
+ struct blk_mq_tag_set tag_set;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
- /*
- * FIXME: this counter is not a very reliable way of keeping
- * track of how many requests that are ongoing. Switch to just
- * letting the block core keep track of requests and per-request
- * associated mmc_queue_req data.
- */
- int qcnt;
+ int in_flight[MMC_ISSUE_MAX];
+ unsigned int cqe_busy;
+#define MMC_CQE_DCMD_BUSY BIT(0)
+#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool use_cqe;
+ bool recovery_needed;
+ bool in_recovery;
+ bool rw_wait;
+ bool waiting;
+ struct work_struct recovery_work;
+ wait_queue_head_t wait;
+ struct request *recovery_req;
+ struct request *complete_req;
+ struct mutex complete_lock;
+ struct work_struct complete_work;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -84,4 +102,22 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
+void mmc_cqe_check_busy(struct mmc_queue *mq);
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
+
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
+
+static inline int mmc_tot_in_flight(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_SYNC] +
+ mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
+static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
#endif
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 863f1dbbfc1b..3698b0576009 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -121,20 +121,18 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
void mmc_gpiod_request_cd_irq(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
- int ret, irq;
+ int irq = -EINVAL;
+ int ret;
if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
return;
- irq = gpiod_to_irq(ctx->cd_gpio);
-
/*
- * Even if gpiod_to_irq() returns a valid IRQ number, the platform might
- * still prefer to poll, e.g., because that IRQ number is already used
- * by another unit and cannot be shared.
+ * Do not use IRQ if the platform prefers to poll, e.g., because that
+ * IRQ number is already used by another unit and cannot be shared.
*/
- if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
- irq = -EINVAL;
+ if (!(host->caps & MMC_CAP_NEEDS_POLL))
+ irq = gpiod_to_irq(ctx->cd_gpio);
if (irq >= 0) {
if (!ctx->cd_gpio_isr)
@@ -307,3 +305,11 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
+
+bool mmc_can_gpio_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->ro_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 567028c9219a..67bd3344dd03 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,6 +81,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
+ select MMC_CQHCI
help
This selects the PCI Secure Digital Host Controller Interface.
Most controllers found today are PCI devices.
@@ -132,6 +133,7 @@ config MMC_SDHCI_OF_ARASAN
depends on MMC_SDHCI_PLTFM
depends on OF
depends on COMMON_CLK
+ select MMC_CQHCI
help
This selects the Arasan Secure Digital Host Controller Interface
(SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
@@ -320,7 +322,7 @@ config MMC_SDHCI_BCM_KONA
config MMC_SDHCI_F_SDH30
tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
depends on MMC_SDHCI_PLTFM
- depends on OF
+ depends on OF || ACPI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
Needed by some Fujitsu SoC for MMC / SD / SDIO support.
@@ -595,11 +597,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
- depends on SUPERH || ARM || ARM64
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select MMC_TMIO_CORE
- select MMC_SDHI_SYS_DMAC if (SUPERH || ARM)
- select MMC_SDHI_INTERNAL_DMAC if ARM64
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs
@@ -607,6 +606,7 @@ config MMC_SDHI
config MMC_SDHI_SYS_DMAC
tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC"
depends on MMC_SDHI
+ default MMC_SDHI if (SUPERH || ARM)
help
This provides DMA support for SDHI SD/SDIO controllers
using SYS-DMAC via DMA Engine. This supports the controllers
@@ -616,6 +616,7 @@ config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
depends on ARM64 || COMPILE_TEST
depends on MMC_SDHI
+ default MMC_SDHI if ARM64
help
This provides DMA support for SDHI SD/SDIO controllers
using on-chip bus mastering. This supports the controllers
@@ -838,14 +839,14 @@ config MMC_USDHI6ROL0
config MMC_REALTEK_PCI
tristate "Realtek PCI-E SD/MMC Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek PCI-E card reader
config MMC_REALTEK_USB
tristate "Realtek USB SD/MMC Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek RTS5129/39 series card reader
@@ -857,6 +858,19 @@ config MMC_SUNXI
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
+config MMC_CQHCI
+ tristate "Command Queue Host Controller Interface support"
+ depends on HAS_DMA
+ help
+ This selects the Command Queue Host Controller Interface (CQHCI)
+ support present in host controllers of Qualcomm Technologies, Inc
+ amongst others.
+ This controller supports eMMC devices with command queue support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index a43cf0d5a5d3..84cd1388abc3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
+sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
@@ -39,12 +39,8 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o
-endif
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o
-endif
+obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
+obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
@@ -92,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
+obj-$(CONFIG_MMC_CQHCI) += cqhci.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 63fe5091ca59..63d27589cd89 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -42,13 +42,11 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/clk.h>
-#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/types.h>
-#include <asm/io.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "goldfish_mmc"
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
new file mode 100644
index 000000000000..159270e947cf
--- /dev/null
+++ b/drivers/mmc/host/cqhci.c
@@ -0,0 +1,1150 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include "cqhci.h"
+
+#define DCMD_SLOT 31
+#define NUM_SLOTS 32
+
+struct cqhci_slot {
+ struct mmc_request *mrq;
+ unsigned int flags;
+#define CQHCI_EXTERNAL_TIMEOUT BIT(0)
+#define CQHCI_COMPLETED BIT(1)
+#define CQHCI_HOST_CRC BIT(2)
+#define CQHCI_HOST_TIMEOUT BIT(3)
+#define CQHCI_HOST_OTHER BIT(4)
+};
+
+static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->desc_base + (tag * cq_host->slot_sz);
+}
+
+static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *desc = get_desc(cq_host, tag);
+
+ return desc + cq_host->task_desc_len;
+}
+
+static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_dma_base +
+ (cq_host->mmc->max_segs * tag *
+ cq_host->trans_desc_len);
+}
+
+static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_base +
+ (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+}
+
+static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *link_temp;
+ dma_addr_t trans_temp;
+
+ link_temp = get_link_desc(cq_host, tag);
+ trans_temp = get_trans_desc_dma(cq_host, tag);
+
+ memset(link_temp, 0, cq_host->link_desc_len);
+ if (cq_host->link_desc_len > 8)
+ *(link_temp + 8) = 0;
+
+ if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
+ *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
+ return;
+ }
+
+ *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
+
+ if (cq_host->dma64) {
+ __le64 *data_addr = (__le64 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le64(trans_temp);
+ } else {
+ __le32 *data_addr = (__le32 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le32(trans_temp);
+ }
+}
+
+static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
+{
+ cqhci_writel(cq_host, set, CQHCI_ISTE);
+ cqhci_writel(cq_host, set, CQHCI_ISGE);
+}
+
+#define DRV_NAME "cqhci"
+
+#define CQHCI_DUMP(f, x...) \
+ pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
+
+static void cqhci_dumpregs(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+
+ CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
+
+ CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CAP),
+ cqhci_readl(cq_host, CQHCI_VER));
+ CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CFG),
+ cqhci_readl(cq_host, CQHCI_CTL));
+ CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_IS),
+ cqhci_readl(cq_host, CQHCI_ISTE));
+ CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_ISGE),
+ cqhci_readl(cq_host, CQHCI_IC));
+ CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDLBA),
+ cqhci_readl(cq_host, CQHCI_TDLBAU));
+ CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDBR),
+ cqhci_readl(cq_host, CQHCI_TCN));
+ CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_DQS),
+ cqhci_readl(cq_host, CQHCI_DPT));
+ CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TCLR),
+ cqhci_readl(cq_host, CQHCI_SSC1));
+ CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_SSC2),
+ cqhci_readl(cq_host, CQHCI_CRDCT));
+ CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_RMEM),
+ cqhci_readl(cq_host, CQHCI_TERRI));
+ CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CRI),
+ cqhci_readl(cq_host, CQHCI_CRA));
+
+ if (cq_host->ops->dumpregs)
+ cq_host->ops->dumpregs(mmc);
+ else
+ CQHCI_DUMP(": ===========================================\n");
+}
+
+/**
+ * The allocated descriptor table for task, link & transfer descritors
+ * looks like:
+ * |----------|
+ * |task desc | |->|----------|
+ * |----------| | |trans desc|
+ * |link desc-|->| |----------|
+ * |----------| .
+ * . .
+ * no. of slots max-segs
+ * . |----------|
+ * |----------|
+ * The idea here is to create the [task+trans] table and mark & point the
+ * link desc to the transfer desc table on a per slot basis.
+ */
+static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
+{
+ int i = 0;
+
+ /* task descriptor can be 64/128 bit irrespective of arch */
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
+ cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
+ CQHCI_TASK_DESC_SZ, CQHCI_CFG);
+ cq_host->task_desc_len = 16;
+ } else {
+ cq_host->task_desc_len = 8;
+ }
+
+ /*
+ * 96 bits length of transfer desc instead of 128 bits which means
+ * ADMA would expect next valid descriptor at the 96th bit
+ * or 128th bit
+ */
+ if (cq_host->dma64) {
+ if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
+ cq_host->trans_desc_len = 12;
+ else
+ cq_host->trans_desc_len = 16;
+ cq_host->link_desc_len = 16;
+ } else {
+ cq_host->trans_desc_len = 8;
+ cq_host->link_desc_len = 8;
+ }
+
+ /* total size of a slot: 1 task & 1 transfer (link) */
+ cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
+
+ cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+ (cq_host->num_slots - 1);
+
+ pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
+ cq_host->slot_sz);
+
+ /*
+ * allocate a dma-mapped chunk of memory for the descriptors
+ * allocate a dma-mapped chunk of memory for link descriptors
+ * setup each link-desc memory offset per slot-number to
+ * the descriptor table.
+ */
+ cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+ if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ return -ENOMEM;
+
+ pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
+ (unsigned long long)cq_host->desc_dma_base,
+ (unsigned long long)cq_host->trans_desc_dma_base);
+
+ for (; i < (cq_host->num_slots); i++)
+ setup_trans_desc(cq_host, i);
+
+ return 0;
+}
+
+static void __cqhci_enable(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+
+ /* Configuration must not be changed while enabled */
+ if (cqcfg & CQHCI_ENABLE) {
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ }
+
+ cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
+
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ cqcfg |= CQHCI_DCMD;
+
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
+ cqcfg |= CQHCI_TASK_DESC_SZ;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBA);
+ cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBAU);
+
+ cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ cqcfg |= CQHCI_ENABLE;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ mmc->cqe_on = true;
+
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+
+ /* Ensure all writes are done before interrupts are enabled */
+ wmb();
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ cq_host->activated = true;
+}
+
+static void __cqhci_disable(struct cqhci_host *cq_host)
+{
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cq_host->mmc->cqe_on = false;
+
+ cq_host->activated = false;
+}
+
+int cqhci_suspend(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (cq_host->enabled)
+ __cqhci_disable(cq_host);
+
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_suspend);
+
+int cqhci_resume(struct mmc_host *mmc)
+{
+ /* Re-enable is done upon first request */
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_resume);
+
+static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int err;
+
+ if (cq_host->enabled)
+ return 0;
+
+ cq_host->rca = card->rca;
+
+ err = cqhci_host_alloc_tdl(cq_host);
+ if (err)
+ return err;
+
+ __cqhci_enable(cq_host);
+
+ cq_host->enabled = true;
+
+#ifdef DEBUG
+ cqhci_dumpregs(cq_host);
+#endif
+ return 0;
+}
+
+/* CQHCI is idle and should halt immediately, so set a small timeout */
+#define CQHCI_OFF_TIMEOUT 100
+
+static void cqhci_off(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ ktime_t timeout;
+ bool timed_out;
+ u32 reg;
+
+ if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
+ return;
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, false);
+
+ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
+
+ timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
+ while (1) {
+ timed_out = ktime_compare(ktime_get(), timeout) > 0;
+ reg = cqhci_readl(cq_host, CQHCI_CTL);
+ if ((reg & CQHCI_HALT) || timed_out)
+ break;
+ }
+
+ if (timed_out)
+ pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
+ else
+ pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
+
+ mmc->cqe_on = false;
+}
+
+static void cqhci_disable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->enabled)
+ return;
+
+ cqhci_off(mmc);
+
+ __cqhci_disable(cq_host);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
+ cq_host->trans_desc_base,
+ cq_host->trans_desc_dma_base);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+
+ cq_host->trans_desc_base = NULL;
+ cq_host->desc_base = NULL;
+
+ cq_host->enabled = false;
+}
+
+static void cqhci_prep_task_desc(struct mmc_request *mrq,
+ u64 *data, bool intr)
+{
+ u32 req_flags = mrq->data->flags;
+
+ *data = CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(intr) |
+ CQHCI_ACT(0x5) |
+ CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
+ CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
+ CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
+ CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
+ CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
+ CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
+ CQHCI_BLK_COUNT(mrq->data->blocks) |
+ CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
+
+ pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
+ mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
+}
+
+static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int sg_count;
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return -EINVAL;
+
+ sg_count = dma_map_sg(mmc_dev(host), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!sg_count) {
+ pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
+ return -ENOMEM;
+ }
+
+ return sg_count;
+}
+
+static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
+ bool dma64)
+{
+ __le32 *attr = (__le32 __force *)desc;
+
+ *attr = (CQHCI_VALID(1) |
+ CQHCI_END(end ? 1 : 0) |
+ CQHCI_INT(0) |
+ CQHCI_ACT(0x4) |
+ CQHCI_DAT_LENGTH(len));
+
+ if (dma64) {
+ __le64 *dataddr = (__le64 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le64(addr);
+ } else {
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le32(addr);
+ }
+}
+
+static int cqhci_prep_tran_desc(struct mmc_request *mrq,
+ struct cqhci_host *cq_host, int tag)
+{
+ struct mmc_data *data = mrq->data;
+ int i, sg_count, len;
+ bool end = false;
+ bool dma64 = cq_host->dma64;
+ dma_addr_t addr;
+ u8 *desc;
+ struct scatterlist *sg;
+
+ sg_count = cqhci_dma_map(mrq->host, mrq);
+ if (sg_count < 0) {
+ pr_err("%s: %s: unable to map sg lists, %d\n",
+ mmc_hostname(mrq->host), __func__, sg_count);
+ return sg_count;
+ }
+
+ desc = get_trans_desc(cq_host, tag);
+
+ for_each_sg(data->sg, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((i+1) == sg_count)
+ end = true;
+ cqhci_set_tran_desc(desc, addr, len, end, dma64);
+ desc += cq_host->trans_desc_len;
+ }
+
+ return 0;
+}
+
+static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ u64 *task_desc = NULL;
+ u64 data = 0;
+ u8 resp_type;
+ u8 *desc;
+ __le64 *dataddr;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u8 timing;
+
+ if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
+ resp_type = 0x0;
+ timing = 0x1;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_R1B) {
+ resp_type = 0x3;
+ timing = 0x0;
+ } else {
+ resp_type = 0x2;
+ timing = 0x1;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
+ memset(task_desc, 0, cq_host->task_desc_len);
+ data |= (CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(1) |
+ CQHCI_QBAR(1) |
+ CQHCI_ACT(0x5) |
+ CQHCI_CMD_INDEX(mrq->cmd->opcode) |
+ CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
+ *task_desc |= data;
+ desc = (u8 *)task_desc;
+ pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
+ mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
+ dataddr = (__le64 __force *)(desc + 4);
+ dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
+
+}
+
+static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static inline int cqhci_tag(struct mmc_request *mrq)
+{
+ return mrq->cmd ? DCMD_SLOT : mrq->tag;
+}
+
+static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ int err = 0;
+ u64 data = 0;
+ u64 *task_desc = NULL;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+
+ if (!cq_host->enabled) {
+ pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
+ return -EINVAL;
+ }
+
+ /* First request after resume has to re-enable */
+ if (!cq_host->activated)
+ __cqhci_enable(cq_host);
+
+ if (!mmc->cqe_on) {
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ mmc->cqe_on = true;
+ pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+ if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ mmc_hostname(mmc));
+ }
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+ }
+
+ if (mrq->data) {
+ task_desc = (__le64 __force *)get_desc(cq_host, tag);
+ cqhci_prep_task_desc(mrq, &data, 1);
+ *task_desc = cpu_to_le64(data);
+ err = cqhci_prep_tran_desc(mrq, cq_host, tag);
+ if (err) {
+ pr_err("%s: cqhci: failed to setup tx desc: %d\n",
+ mmc_hostname(mmc), err);
+ return err;
+ }
+ } else {
+ cqhci_prep_dcmd_desc(mmc, mrq);
+ }
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+
+ if (cq_host->recovery_halt) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ cq_host->slot[tag].mrq = mrq;
+ cq_host->slot[tag].flags = 0;
+
+ cq_host->qcnt += 1;
+
+ cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+ if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
+ pr_debug("%s: cqhci: doorbell not set for tag %d\n",
+ mmc_hostname(mmc), tag);
+out_unlock:
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (err)
+ cqhci_post_req(mmc, mrq);
+
+ return err;
+}
+
+static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool notify)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->recovery_halt) {
+ cq_host->recovery_halt = true;
+ pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
+ wake_up(&cq_host->wait_queue);
+ if (notify && mrq->recovery_notifier)
+ mrq->recovery_notifier(mrq);
+ }
+}
+
+static unsigned int cqhci_error_flags(int error1, int error2)
+{
+ int error = error1 ? error1 : error2;
+
+ switch (error) {
+ case -EILSEQ:
+ return CQHCI_HOST_CRC;
+ case -ETIMEDOUT:
+ return CQHCI_HOST_TIMEOUT;
+ default:
+ return CQHCI_HOST_OTHER;
+ }
+}
+
+static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
+ int data_error)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot;
+ u32 terri;
+ int tag;
+
+ spin_lock(&cq_host->lock);
+
+ terri = cqhci_readl(cq_host, CQHCI_TERRI);
+
+ pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error, terri);
+
+ /* Forget about errors when recovery has already been triggered */
+ if (cq_host->recovery_halt)
+ goto out_unlock;
+
+ if (!cq_host->qcnt) {
+ WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error,
+ terri);
+ goto out_unlock;
+ }
+
+ if (CQHCI_TERRI_C_VALID(terri)) {
+ tag = CQHCI_TERRI_C_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(cmd_error, data_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (CQHCI_TERRI_D_VALID(terri)) {
+ tag = CQHCI_TERRI_D_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (!cq_host->recovery_halt) {
+ /*
+ * The only way to guarantee forward progress is to mark at
+ * least one task in error, so if none is indicated, pick one.
+ */
+ for (tag = 0; tag < NUM_SLOTS; tag++) {
+ slot = &cq_host->slot[tag];
+ if (!slot->mrq)
+ continue;
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ break;
+ }
+ }
+
+out_unlock:
+ spin_unlock(&cq_host->lock);
+}
+
+static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq) {
+ WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
+ mmc_hostname(mmc), tag);
+ return;
+ }
+
+ /* No completions allowed during recovery */
+ if (cq_host->recovery_halt) {
+ slot->flags |= CQHCI_COMPLETED;
+ return;
+ }
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+ }
+
+ mmc_cqe_request_done(mmc, mrq);
+}
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error)
+{
+ u32 status;
+ unsigned long tag = 0, comp_status;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ status = cqhci_readl(cq_host, CQHCI_IS);
+ cqhci_writel(cq_host, status, CQHCI_IS);
+
+ pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
+
+ if ((status & CQHCI_IS_RED) || cmd_error || data_error)
+ cqhci_error_irq(mmc, status, cmd_error, data_error);
+
+ if (status & CQHCI_IS_TCC) {
+ /* read TCN and complete the request */
+ comp_status = cqhci_readl(cq_host, CQHCI_TCN);
+ cqhci_writel(cq_host, comp_status, CQHCI_TCN);
+ pr_debug("%s: cqhci: TCN: 0x%08lx\n",
+ mmc_hostname(mmc), comp_status);
+
+ spin_lock(&cq_host->lock);
+
+ for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: cqhci: completing tag %lu\n",
+ mmc_hostname(mmc), tag);
+ cqhci_finish_mrq(mmc, tag);
+ }
+
+ if (cq_host->waiting_for_idle && !cq_host->qcnt) {
+ cq_host->waiting_for_idle = false;
+ wake_up(&cq_host->wait_queue);
+ }
+
+ spin_unlock(&cq_host->lock);
+ }
+
+ if (status & CQHCI_IS_TCL)
+ wake_up(&cq_host->wait_queue);
+
+ if (status & CQHCI_IS_HAC)
+ wake_up(&cq_host->wait_queue);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cqhci_irq);
+
+static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
+{
+ unsigned long flags;
+ bool is_idle;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ is_idle = !cq_host->qcnt || cq_host->recovery_halt;
+ *ret = cq_host->recovery_halt ? -EBUSY : 0;
+ cq_host->waiting_for_idle = !is_idle;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ return is_idle;
+}
+
+static int cqhci_wait_for_idle(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int ret;
+
+ wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
+
+ return ret;
+}
+
+static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool *recovery_needed)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ unsigned long flags;
+ bool timed_out;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ timed_out = slot->mrq == mrq;
+ if (timed_out) {
+ slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
+ cqhci_recovery_needed(mmc, mrq, false);
+ *recovery_needed = cq_host->recovery_halt;
+ }
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (timed_out) {
+ pr_err("%s: cqhci: timeout for tag %d\n",
+ mmc_hostname(mmc), tag);
+ cqhci_dumpregs(cq_host);
+ }
+
+ return timed_out;
+}
+
+static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
+{
+ return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
+}
+
+static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_CLEAR_ALL_TASKS;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
+
+ return ret;
+}
+
+static bool cqhci_halted(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
+}
+
+static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ if (cqhci_halted(cq_host))
+ return true;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_HALT;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+}
+
+/*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+ * layers will need to send a STOP command), so we set the timeout based on a
+ * generous command timeout.
+ */
+#define CQHCI_START_HALT_TIMEOUT 5
+
+static void cqhci_recovery_start(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, true);
+
+ mmc->cqe_on = false;
+}
+
+static int cqhci_error_from_flags(unsigned int flags)
+{
+ if (!flags)
+ return 0;
+
+ /* CRC errors might indicate re-tuning so prefer to report that */
+ if (flags & CQHCI_HOST_CRC)
+ return -EILSEQ;
+
+ if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return -EIO;
+}
+
+static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
+{
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq)
+ return;
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ data->bytes_xfered = 0;
+ data->error = cqhci_error_from_flags(slot->flags);
+ } else {
+ mrq->cmd->error = cqhci_error_from_flags(slot->flags);
+ }
+
+ mmc_cqe_request_done(cq_host->mmc, mrq);
+}
+
+static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
+{
+ int i;
+
+ for (i = 0; i < cq_host->num_slots; i++)
+ cqhci_recover_mrq(cq_host, i);
+}
+
+/*
+ * By now the command and data lines should be unused so there is no reason for
+ * CQHCI to take a long time to halt, but if it doesn't halt there could be
+ * problems clearing tasks, so be generous.
+ */
+#define CQHCI_FINISH_HALT_TIMEOUT 20
+
+/* CQHCI could be expected to clear it's internal state pretty quickly */
+#define CQHCI_CLEAR_TIMEOUT 20
+
+static void cqhci_recovery_finish(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+ u32 cqcfg;
+ bool ok;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+ if (!ok) {
+ pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ /* Be sure that there are no tasks */
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+ WARN_ON(!ok);
+ }
+
+ cqhci_recover_mrqs(cq_host);
+
+ WARN_ON(cq_host->qcnt);
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ cq_host->qcnt = 0;
+ cq_host->recovery_halt = false;
+ mmc->cqe_on = false;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ /* Ensure all writes are done before interrupts are re-enabled */
+ wmb();
+
+ cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
+}
+
+static const struct mmc_cqe_ops cqhci_cqe_ops = {
+ .cqe_enable = cqhci_enable,
+ .cqe_disable = cqhci_disable,
+ .cqe_request = cqhci_request,
+ .cqe_post_req = cqhci_post_req,
+ .cqe_off = cqhci_off,
+ .cqe_wait_for_idle = cqhci_wait_for_idle,
+ .cqe_timeout = cqhci_timeout,
+ .cqe_recovery_start = cqhci_recovery_start,
+ .cqe_recovery_finish = cqhci_recovery_finish,
+};
+
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
+{
+ struct cqhci_host *cq_host;
+ struct resource *cqhci_memres = NULL;
+
+ /* check and setup CMDQ interface */
+ cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cqhci_mem");
+ if (!cqhci_memres) {
+ dev_dbg(&pdev->dev, "CMDQ not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host)
+ return ERR_PTR(-ENOMEM);
+ cq_host->mmio = devm_ioremap(&pdev->dev,
+ cqhci_memres->start,
+ resource_size(cqhci_memres));
+ if (!cq_host->mmio) {
+ dev_err(&pdev->dev, "failed to remap cqhci regs\n");
+ return ERR_PTR(-EBUSY);
+ }
+ dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
+
+ return cq_host;
+}
+EXPORT_SYMBOL(cqhci_pltfm_init);
+
+static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
+{
+ return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
+}
+
+static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
+{
+ u32 ver = cqhci_readl(cq_host, CQHCI_VER);
+
+ return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
+}
+
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
+ bool dma64)
+{
+ int err;
+
+ cq_host->dma64 = dma64;
+ cq_host->mmc = mmc;
+ cq_host->mmc->cqe_private = cq_host;
+
+ cq_host->num_slots = NUM_SLOTS;
+ cq_host->dcmd_slot = DCMD_SLOT;
+
+ mmc->cqe_ops = &cqhci_cqe_ops;
+
+ mmc->cqe_qdepth = NUM_SLOTS;
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ mmc->cqe_qdepth -= 1;
+
+ cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
+ sizeof(*cq_host->slot), GFP_KERNEL);
+ if (!cq_host->slot) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ spin_lock_init(&cq_host->lock);
+
+ init_completion(&cq_host->halt_comp);
+ init_waitqueue_head(&cq_host->wait_queue);
+
+ pr_info("%s: CQHCI version %u.%02u\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host));
+
+ return 0;
+
+out_err:
+ pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host), err);
+ return err;
+}
+EXPORT_SYMBOL(cqhci_init);
+
+MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
+MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
new file mode 100644
index 000000000000..9e68286a07b4
--- /dev/null
+++ b/drivers/mmc/host/cqhci.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQHCI_H
+#define LINUX_MMC_CQHCI_H
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/irqreturn.h>
+#include <asm/io.h>
+
+/* registers */
+/* version */
+#define CQHCI_VER 0x00
+#define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8)
+#define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4)
+#define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0))
+
+/* capabilities */
+#define CQHCI_CAP 0x04
+/* configuration */
+#define CQHCI_CFG 0x08
+#define CQHCI_DCMD 0x00001000
+#define CQHCI_TASK_DESC_SZ 0x00000100
+#define CQHCI_ENABLE 0x00000001
+
+/* control */
+#define CQHCI_CTL 0x0C
+#define CQHCI_CLEAR_ALL_TASKS 0x00000100
+#define CQHCI_HALT 0x00000001
+
+/* interrupt status */
+#define CQHCI_IS 0x10
+#define CQHCI_IS_HAC BIT(0)
+#define CQHCI_IS_TCC BIT(1)
+#define CQHCI_IS_RED BIT(2)
+#define CQHCI_IS_TCL BIT(3)
+
+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED)
+
+/* interrupt status enable */
+#define CQHCI_ISTE 0x14
+
+/* interrupt signal enable */
+#define CQHCI_ISGE 0x18
+
+/* interrupt coalescing */
+#define CQHCI_IC 0x1C
+#define CQHCI_IC_ENABLE BIT(31)
+#define CQHCI_IC_RESET BIT(16)
+#define CQHCI_IC_ICCTHWEN BIT(15)
+#define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8)
+#define CQHCI_IC_ICTOVALWEN BIT(7)
+#define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F)
+
+/* task list base address */
+#define CQHCI_TDLBA 0x20
+
+/* task list base address upper */
+#define CQHCI_TDLBAU 0x24
+
+/* door-bell */
+#define CQHCI_TDBR 0x28
+
+/* task completion notification */
+#define CQHCI_TCN 0x2C
+
+/* device queue status */
+#define CQHCI_DQS 0x30
+
+/* device pending tasks */
+#define CQHCI_DPT 0x34
+
+/* task clear */
+#define CQHCI_TCLR 0x38
+
+/* send status config 1 */
+#define CQHCI_SSC1 0x40
+
+/* send status config 2 */
+#define CQHCI_SSC2 0x44
+
+/* response for dcmd */
+#define CQHCI_CRDCT 0x48
+
+/* response mode error mask */
+#define CQHCI_RMEM 0x50
+
+/* task error info */
+#define CQHCI_TERRI 0x54
+
+#define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0))
+#define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8)
+#define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15))
+#define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16)
+#define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24)
+#define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31))
+
+/* command response index */
+#define CQHCI_CRI 0x58
+
+/* command response argument */
+#define CQHCI_CRA 0x5C
+
+#define CQHCI_INT_ALL 0xF
+#define CQHCI_IC_DEFAULT_ICCTH 31
+#define CQHCI_IC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define CQHCI_VALID(x) (((x) & 1) << 0)
+#define CQHCI_END(x) (((x) & 1) << 1)
+#define CQHCI_INT(x) (((x) & 1) << 2)
+#define CQHCI_ACT(x) (((x) & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define CQHCI_FORCED_PROG(x) (((x) & 1) << 6)
+#define CQHCI_CONTEXT(x) (((x) & 0xF) << 7)
+#define CQHCI_DATA_TAG(x) (((x) & 1) << 11)
+#define CQHCI_DATA_DIR(x) (((x) & 1) << 12)
+#define CQHCI_PRIORITY(x) (((x) & 1) << 13)
+#define CQHCI_QBAR(x) (((x) & 1) << 14)
+#define CQHCI_REL_WRITE(x) (((x) & 1) << 15)
+#define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16)
+#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22)
+#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32)
+#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0)
+
+struct cqhci_host_ops;
+struct mmc_host;
+struct cqhci_slot;
+
+struct cqhci_host {
+ const struct cqhci_host_ops *ops;
+ void __iomem *mmio;
+ struct mmc_host *mmc;
+
+ spinlock_t lock;
+
+ /* relative card address of device */
+ unsigned int rca;
+
+ /* 64 bit DMA */
+ bool dma64;
+ int num_slots;
+ int qcnt;
+
+ u32 dcmd_slot;
+ u32 caps;
+#define CQHCI_TASK_DESC_SZ_128 0x1
+
+ u32 quirks;
+#define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+
+ bool enabled;
+ bool halted;
+ bool init_done;
+ bool activated;
+ bool waiting_for_idle;
+ bool recovery_halt;
+
+ size_t desc_size;
+ size_t data_size;
+
+ u8 *desc_base;
+
+ /* total descriptor size */
+ u8 slot_sz;
+
+ /* 64/128 bit depends on CQHCI_CFG */
+ u8 task_desc_len;
+
+ /* 64 bit on 32-bit arch, 128 bit on 64-bit */
+ u8 link_desc_len;
+
+ u8 *trans_desc_base;
+ /* same length as transfer descriptor */
+ u8 trans_desc_len;
+
+ dma_addr_t desc_dma_base;
+ dma_addr_t trans_desc_dma_base;
+
+ struct completion halt_comp;
+ wait_queue_head_t wait_queue;
+ struct cqhci_slot *slot;
+};
+
+struct cqhci_host_ops {
+ void (*dumpregs)(struct mmc_host *mmc);
+ void (*write_l)(struct cqhci_host *host, u32 val, int reg);
+ u32 (*read_l)(struct cqhci_host *host, int reg);
+ void (*enable)(struct mmc_host *mmc);
+ void (*disable)(struct mmc_host *mmc, bool recovery);
+};
+
+static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cqhci_readl(struct cqhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl_relaxed(host->mmio + reg);
+}
+
+struct platform_device;
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error);
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
+int cqhci_suspend(struct mmc_host *mmc);
+int cqhci_resume(struct mmc_host *mmc);
+
+#endif
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 351330dfb954..8e363174f9d6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -174,7 +174,7 @@ module_param(poll_loopcount, uint, S_IRUGO);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
-static unsigned __initdata use_dma = 1;
+static unsigned use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
@@ -496,8 +496,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
return ret;
}
-static void __init_or_module
-davinci_release_dma_channels(struct mmc_davinci_host *host)
+static void davinci_release_dma_channels(struct mmc_davinci_host *host)
{
if (!host->use_dma)
return;
@@ -506,7 +505,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
dma_release_channel(host->dma_rx);
}
-static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
@@ -1201,7 +1200,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
return 0;
}
-static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+static int davinci_mmcsd_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
@@ -1254,8 +1253,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
pdev->id_entry = match->data;
ret = mmc_of_parse(mmc);
if (ret) {
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "could not parse of data: %d\n", ret);
goto parse_fail;
}
} else {
@@ -1414,11 +1414,12 @@ static struct platform_driver davinci_mmcsd_driver = {
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
+ .probe = davinci_mmcsd_probe,
.remove = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
};
-module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
+module_platform_driver(davinci_mmcsd_driver);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e0862d3f65b3..32a6a228cd12 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1208,7 +1208,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto free_host;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e8a1bb1ae694..70b0df8b9c78 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -82,6 +82,10 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @mmcimask1: true if variant have a MMCIMASK1 register.
+ * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
+ * register.
+ * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
*/
struct variant_data {
unsigned int clkreg;
@@ -111,6 +115,9 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool mmcimask1;
+ u32 start_err;
+ u32 opendrain;
};
static struct variant_data variant_arm = {
@@ -120,6 +127,9 @@ static struct variant_data variant_arm = {
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo = {
@@ -128,6 +138,9 @@ static struct variant_data variant_arm_extended_fifo = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
@@ -137,6 +150,9 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_u300 = {
@@ -152,6 +168,9 @@ static struct variant_data variant_u300 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_nomadik = {
@@ -168,6 +187,9 @@ static struct variant_data variant_nomadik = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500 = {
@@ -190,6 +212,9 @@ static struct variant_data variant_ux500 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500v2 = {
@@ -214,6 +239,26 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+};
+
+static struct variant_data variant_stm32 = {
+ .fifosize = 32 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 48000000,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_qcom = {
@@ -232,6 +277,9 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
/* Busy detection for the ST Micro variant */
@@ -396,6 +444,7 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
void __iomem *base = host->base;
+ struct variant_data *variant = host->variant;
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
@@ -406,7 +455,10 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
writel(mask0, base + MMCIMASK0);
}
- writel(mask, base + MMCIMASK1);
+ if (variant->mmcimask1)
+ writel(mask, base + MMCIMASK1);
+
+ host->mask1_reg = mask;
}
static void mmci_stop_data(struct mmci_host *host)
@@ -921,8 +973,9 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
return;
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
- MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ host->variant->start_err |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -1286,7 +1339,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
status = readl(host->base + MMCISTATUS);
if (host->singleirq) {
- if (status & readl(host->base + MMCIMASK1))
+ if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
status &= ~MCI_IRQ1MASK;
@@ -1429,16 +1482,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
~MCI_ST_DATA2DIREN);
}
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
- if (host->hw_designer != AMBA_VENDOR_ST)
- pwr |= MCI_ROD;
- else {
- /*
- * The ST Micro variant use the ROD bit for something
- * else and only has OD (Open Drain).
- */
- pwr |= MCI_OD;
- }
+ if (variant->opendrain) {
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= variant->opendrain;
+ } else {
+ /*
+ * If the variant cannot configure the pads by its own, then we
+ * expect the pinctrl to be able to do that for us
+ */
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pinctrl_select_state(host->pinctrl, host->pins_opendrain);
+ else
+ pinctrl_select_state(host->pinctrl, host->pins_default);
}
/*
@@ -1583,6 +1638,35 @@ static int mmci_probe(struct amba_device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ /*
+ * Some variant (STM32) doesn't have opendrain bit, nevertheless
+ * pins can be set accordingly using pinctrl
+ */
+ if (!variant->opendrain) {
+ host->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ dev_err(&dev->dev, "failed to get pinctrl");
+ ret = PTR_ERR(host->pinctrl);
+ goto host_free;
+ }
+
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(host->pins_default)) {
+ dev_err(mmc_dev(mmc), "Can't select default pins\n");
+ ret = PTR_ERR(host->pins_default);
+ goto host_free;
+ }
+
+ host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
+ MMCI_PINCTRL_STATE_OPENDRAIN);
+ if (IS_ERR(host->pins_opendrain)) {
+ dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
+ ret = PTR_ERR(host->pins_opendrain);
+ goto host_free;
+ }
+ }
+
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
@@ -1729,7 +1813,10 @@ static int mmci_probe(struct amba_device *dev,
spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
+
writel(0xfff, host->base + MMCICLEAR);
/*
@@ -1809,6 +1896,7 @@ static int mmci_remove(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
/*
* Undo pm_runtime_put() in probe. We use the _sync
@@ -1819,7 +1907,9 @@ static int mmci_remove(struct amba_device *dev)
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
@@ -1951,6 +2041,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_ux500v2,
},
+ {
+ .id = 0x00880180,
+ .mask = 0x00ffffff,
+ .data = &variant_stm32,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4a8bef1aac8f..f91cdf7f6dae 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -192,6 +192,8 @@
#define NR_SG 128
+#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain"
+
struct clk;
struct variant_data;
struct dma_chan;
@@ -223,9 +225,13 @@ struct mmci_host {
u32 clk_reg;
u32 datactrl_reg;
u32 busy_status;
+ u32 mask1_reg;
bool vqmmc_enabled;
struct mmci_platform_data *plat;
struct variant_data *variant;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_opendrain;
u8 hw_designer;
u8 hw_revision:4;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index b9dfea5d8193..f13f798d8506 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -35,6 +35,28 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct tmio_mmc_dma {
+ enum dma_slave_buswidth dma_buswidth;
+ bool (*filter)(struct dma_chan *chan, void *arg);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+ struct completion dma_dataend;
+ struct tasklet_struct dma_complete;
+};
+
+struct renesas_sdhi {
+ struct clk *clk;
+ struct clk *clk_cd;
+ struct tmio_mmc_data mmc_data;
+ struct tmio_mmc_dma dma_priv;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default, *pins_uhs;
+ void __iomem *scc_ctl;
+ u32 scc_tappos;
+};
+
+#define host_to_priv(host) \
+ container_of((host)->pdata, struct renesas_sdhi, mmc_data)
+
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops);
int renesas_sdhi_remove(struct platform_device *pdev);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 157e1d9e7725..80943fa07db6 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -47,19 +47,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-#define host_to_priv(host) \
- container_of((host)->pdata, struct renesas_sdhi, mmc_data)
-
-struct renesas_sdhi {
- struct clk *clk;
- struct clk *clk_cd;
- struct tmio_mmc_data mmc_data;
- struct tmio_mmc_dma dma_priv;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default, *pins_uhs;
- void __iomem *scc_ctl;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -281,7 +268,7 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
/* Read TAPNUM */
return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
@@ -498,7 +485,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- goto eprobe;
+ return ret;
}
/*
@@ -524,11 +511,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
"state_uhs");
}
- host = tmio_mmc_host_alloc(pdev);
- if (!host) {
- ret = -ENOMEM;
- goto eprobe;
- }
+ host = tmio_mmc_host_alloc(pdev, mmc_data);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
@@ -542,18 +527,18 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->bus_shift = of_data->bus_shift;
}
- host->dma = dma_priv;
host->write16_hook = renesas_sdhi_write16_hook;
host->clk_enable = renesas_sdhi_clk_enable;
host->clk_update = renesas_sdhi_clk_update;
host->clk_disable = renesas_sdhi_clk_disable;
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
/* SDR speeds are only available on Gen2+ */
if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
- host->card_busy = renesas_sdhi_card_busy;
- host->start_signal_voltage_switch =
+ host->ops.card_busy = renesas_sdhi_card_busy;
+ host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
}
@@ -587,10 +572,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- ret = tmio_mmc_host_probe(host, mmc_data, dma_ops);
- if (ret < 0)
+ ret = renesas_sdhi_clk_enable(host);
+ if (ret)
goto efree;
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
@@ -607,7 +596,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
- host->scc_tappos = taps->tap;
+ priv->scc_tappos = taps->tap;
hit = true;
break;
}
@@ -651,19 +640,21 @@ int renesas_sdhi_probe(struct platform_device *pdev,
eirq:
tmio_mmc_host_remove(host);
+edisclk:
+ renesas_sdhi_clk_disable(host);
efree:
tmio_mmc_host_free(host);
-eprobe:
+
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
int renesas_sdhi_remove(struct platform_device *pdev)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
+ renesas_sdhi_clk_disable(host);
return 0;
}
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 41cbe84c1d18..7c03cfead6f9 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -103,6 +103,8 @@ renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host,
static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
@@ -110,8 +112,8 @@ renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1,
INFO1_CLEAR);
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void
@@ -130,7 +132,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
static void
renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) {
- tasklet_schedule(&host->dma_complete);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ tasklet_schedule(&priv->dma_priv.dma_complete);
}
static void
@@ -220,10 +224,12 @@ static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&host->dma_complete,
+ tasklet_init(&priv->dma_priv.dma_complete,
renesas_sdhi_internal_dmac_complete_tasklet_fn,
(unsigned long)host);
tasklet_init(&host->dma_issue,
@@ -255,6 +261,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ .soc_id = "r8a7795", .revision = "ES2.0" },
{ .soc_id = "r8a7796", .revision = "ES1.0" },
+ { .soc_id = "r8a77995", .revision = "ES1.0" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 9ab10436e4b8..82d757c480b2 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -117,11 +117,13 @@ MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
@@ -138,12 +140,15 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
{
- complete(&host->dma_dataend);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ complete(&priv->dma_priv.dma_dataend);
}
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
{
struct tmio_mmc_host *host = arg;
+ struct renesas_sdhi *priv = host_to_priv(host);
spin_lock_irq(&host->lock);
@@ -161,7 +166,7 @@ static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
spin_unlock_irq(&host->lock);
- wait_for_completion(&host->dma_dataend);
+ wait_for_completion(&priv->dma_priv.dma_dataend);
spin_lock_irq(&host->lock);
tmio_mmc_do_data_irq(host);
@@ -171,6 +176,7 @@ out:
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
@@ -214,7 +220,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -245,6 +251,7 @@ pio:
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
@@ -293,7 +300,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -341,7 +348,7 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
spin_lock_irq(&host->lock);
- if (host && host->data) {
+ if (host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
@@ -359,9 +366,11 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* We can only either use DMA for both Tx and Rx or not use it at all */
- if (!host->dma || (!host->pdev->dev.of_node &&
- (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
+ if (!host->pdev->dev.of_node &&
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -378,7 +387,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_tx,
+ priv->dma_priv.filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -389,7 +398,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start +
(CTL_SD_DATA_PORT << host->bus_shift);
- cfg.dst_addr_width = host->dma->dma_buswidth;
+ cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.dst_addr_width)
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_addr = 0;
@@ -398,7 +407,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_rx,
+ priv->dma_priv.filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -408,7 +417,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
- cfg.src_addr_width = host->dma->dma_buswidth;
+ cfg.src_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.src_addr_width)
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
@@ -420,7 +429,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
if (!host->bounce_buf)
goto ebouncebuf;
- init_completion(&host->dma_dataend);
+ init_completion(&priv->dma_priv.dma_dataend);
tasklet_init(&host->dma_issue,
renesas_sdhi_sys_dmac_issue_tasklet_fn,
(unsigned long)host);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 0848dc0f882e..30bd8081307e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -30,7 +30,7 @@
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/card.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_sdmmc {
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 76da1687ab37..78422079ecfa 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -31,7 +31,7 @@
#include <linux/scatterlist.h>
#include <linux/pm_runtime.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <asm/unaligned.h>
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 555c7f133eb8..f77493604312 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1660,7 +1660,7 @@ static int s3cmci_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq == 0) {
+ if (host->irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto probe_iounmap;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b988997a1e80..4065da58789d 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -76,6 +76,7 @@ struct sdhci_acpi_slot {
size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
+ int (*setup_host)(struct platform_device *pdev);
};
struct sdhci_acpi_host {
@@ -96,14 +97,21 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
return c->slot && (c->slot->flags & flag);
}
+#define INTEL_DSM_HS_CAPS_SDR25 BIT(0)
+#define INTEL_DSM_HS_CAPS_DDR50 BIT(1)
+#define INTEL_DSM_HS_CAPS_SDR50 BIT(2)
+#define INTEL_DSM_HS_CAPS_SDR104 BIT(3)
+
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_V33_SWITCH = 4,
+ INTEL_DSM_HS_CAPS = 8,
};
struct intel_host {
u32 dsm_fns;
+ u32 hs_caps;
};
static const guid_t intel_dsm_guid =
@@ -152,6 +160,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
{
int err;
+ intel_host->hs_caps = ~0;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
@@ -161,6 +171,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
pr_debug("%s: DSM function mask %#x\n",
mmc_hostname(mmc), intel_host->dsm_fns);
+
+ intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps);
}
static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -398,6 +410,26 @@ static int intel_probe_slot(struct platform_device *pdev, const char *hid,
return 0;
}
+static int intel_setup_host(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
+
+ return 0;
+}
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -409,6 +441,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -421,6 +454,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -432,6 +466,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -446,6 +481,83 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.caps = MMC_CAP_NONREMOVABLE,
};
+/* AMD sdhci reset dll register. */
+#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
+
+static int amd_select_drive_strength(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type)
+{
+ return MMC_SET_DRIVER_TYPE_A;
+}
+
+static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
+{
+ /* AMD Platform requires dll setting */
+ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
+ usleep_range(10, 20);
+ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+}
+
+/*
+ * For AMD Platform it is required to disable the tuning
+ * bit first controller to bring to HS Mode from HS200
+ * mode, later enable to tune to HS400 mode.
+ */
+static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned int old_timing = host->timing;
+
+ sdhci_set_ios(mmc, ios);
+ if (old_timing == MMC_TIMING_MMC_HS200 &&
+ ios->timing == MMC_TIMING_MMC_HS)
+ sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
+ if (old_timing != MMC_TIMING_MMC_HS400 &&
+ ios->timing == MMC_TIMING_MMC_HS400) {
+ sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
+ sdhci_acpi_amd_hs400_dll(host);
+ }
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+ .ops = &sdhci_acpi_ops_amd,
+};
+
+static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host = c->host;
+
+ sdhci_read_caps(host);
+ if (host->caps1 & SDHCI_SUPPORT_DDR50)
+ host->mmc->caps = MMC_CAP_1_8V_DDR;
+
+ if ((host->caps1 & SDHCI_SUPPORT_SDR104) &&
+ (host->mmc->caps & MMC_CAP_1_8V_DDR))
+ host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+
+ host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
+ host->mmc_host_ops.set_ios = amd_set_ios;
+ return 0;
+}
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+};
+
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
@@ -469,6 +581,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "PNP0D40" },
{ "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
{ "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
+ { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc },
{ },
};
@@ -485,6 +598,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "PNP0D40" },
{ "QCOM8051" },
{ "QCOM8052" },
+ { "AMDI0040" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
@@ -566,6 +680,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->hw_name = "ACPI";
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ err = -EINVAL;
+ goto err_free;
+ }
host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
resource_size(iomem));
@@ -609,10 +727,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
}
}
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_free;
+ if (c->slot && c->slot->setup_host) {
+ err = c->slot->setup_host(pdev);
+ if (err)
+ goto err_cleanup;
+ }
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_cleanup;
+
if (c->use_runtime_pm) {
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
@@ -625,6 +753,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return 0;
+err_cleanup:
+ sdhci_cleanup_host(c->host);
err_free:
sdhci_free_host(c->host);
return err;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 8b941f814472..cd2b5f643a15 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -193,6 +193,7 @@ struct pltfm_imx_data {
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
+ unsigned int actual_clock;
enum {
NO_CMD_PENDING, /* no multiblock command pending */
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
@@ -1403,11 +1404,15 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
int ret;
ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
if (!sdhci_sdio_irq_enabled(host)) {
+ imx_data->actual_clock = host->mmc->actual_clock;
+ esdhc_pltfm_set_clock(host, 0);
clk_disable_unprepare(imx_data->clk_per);
clk_disable_unprepare(imx_data->clk_ipg);
}
@@ -1423,31 +1428,34 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int err;
+ err = clk_prepare_enable(imx_data->clk_ahb);
+ if (err)
+ return err;
+
if (!sdhci_sdio_irq_enabled(host)) {
err = clk_prepare_enable(imx_data->clk_per);
if (err)
- return err;
+ goto disable_ahb_clk;
err = clk_prepare_enable(imx_data->clk_ipg);
if (err)
goto disable_per_clk;
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
}
- err = clk_prepare_enable(imx_data->clk_ahb);
- if (err)
- goto disable_ipg_clk;
+
err = sdhci_runtime_resume_host(host);
if (err)
- goto disable_ahb_clk;
+ goto disable_ipg_clk;
return 0;
-disable_ahb_clk:
- clk_disable_unprepare(imx_data->clk_ahb);
disable_ipg_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_ipg);
disable_per_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_per);
+disable_ahb_clk:
+ clk_disable_unprepare(imx_data->clk_ahb);
return err;
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 0720ea717011..c33a5f7393bd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -25,11 +25,13 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include "sdhci-pltfm.h"
#include <linux/of.h>
-#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#include "cqhci.h"
+#include "sdhci-pltfm.h"
+#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
@@ -90,6 +92,7 @@ struct sdhci_arasan_data {
struct phy *phy;
bool is_phy_on;
+ bool has_cqe;
struct clk_hw sdcardclk_hw;
struct clk *sdcardclk;
@@ -262,6 +265,17 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -269,6 +283,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
};
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
@@ -278,6 +293,62 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_arasan_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_arasan_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_arasan_cqhci_ops = {
+ .enable = sdhci_arasan_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_arasan_dumpregs,
+};
+
+static const struct sdhci_ops sdhci_arasan_cqe_ops = {
+ .set_clock = sdhci_arasan_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_arasan_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
+ .irq = sdhci_arasan_cqhci_irq,
+};
+
+static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -297,6 +368,12 @@ static int sdhci_arasan_suspend(struct device *dev)
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
+ if (sdhci_arasan->has_cqe) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -353,7 +430,16 @@ static int sdhci_arasan_resume(struct device *dev)
sdhci_arasan->is_phy_on = true;
}
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret) {
+ dev_err(dev, "Cannot resume host.\n");
+ return ret;
+ }
+
+ if (sdhci_arasan->has_cqe)
+ return cqhci_resume(host->mmc);
+
+ return 0;
}
#endif /* ! CONFIG_PM_SLEEP */
@@ -556,6 +642,49 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
+{
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!sdhci_arasan->has_cqe)
+ return sdhci_add_host(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_arasan_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
@@ -566,9 +695,15 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
+ const struct sdhci_pltfm_data *pdata;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1"))
+ pdata = &sdhci_arasan_cqe_pdata;
+ else
+ pdata = &sdhci_arasan_pdata;
+
+ host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan));
- host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
- sizeof(*sdhci_arasan));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -663,9 +798,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan_hs400_enhanced_strobe;
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch;
+ sdhci_arasan->has_cqe = true;
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
}
- ret = sdhci_add_host(host);
+ ret = sdhci_arasan_add_host(sdhci_arasan);
if (ret)
goto err_add_host;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1f424374bbbb..4ffa6b173a21 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
+ u32 val;
+
sdhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (mask & SDHCI_RESET_ALL) {
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+ }
}
/* The SCFG, Supplemental Configuration Unit, provides SoC specific
diff --git a/drivers/mmc/host/sdhci-pci-arasan.c b/drivers/mmc/host/sdhci-pci-arasan.c
new file mode 100644
index 000000000000..499f3205ec5c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-arasan.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sdhci-pci-arasan.c - Driver for Arasan PCI Controller with
+ * integrated phy.
+ *
+ * Copyright (C) 2017 Arasan Chip Systems Inc.
+ *
+ * Author: Atul Garg <agarg@arasan.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+
+/* Extra registers for Arasan SD/SDIO/MMC Host Controller with PHY */
+#define PHY_ADDR_REG 0x300
+#define PHY_DAT_REG 0x304
+
+#define PHY_WRITE BIT(8)
+#define PHY_BUSY BIT(9)
+#define DATA_MASK 0xFF
+
+/* PHY Specific Registers */
+#define DLL_STATUS 0x00
+#define IPAD_CTRL1 0x01
+#define IPAD_CTRL2 0x02
+#define IPAD_STS 0x03
+#define IOREN_CTRL1 0x06
+#define IOREN_CTRL2 0x07
+#define IOPU_CTRL1 0x08
+#define IOPU_CTRL2 0x09
+#define ITAP_DELAY 0x0C
+#define OTAP_DELAY 0x0D
+#define STRB_SEL 0x0E
+#define CLKBUF_SEL 0x0F
+#define MODE_CTRL 0x11
+#define DLL_TRIM 0x12
+#define CMD_CTRL 0x20
+#define DATA_CTRL 0x21
+#define STRB_CTRL 0x22
+#define CLK_CTRL 0x23
+#define PHY_CTRL 0x24
+
+#define DLL_ENBL BIT(3)
+#define RTRIM_EN BIT(1)
+#define PDB_ENBL BIT(1)
+#define RETB_ENBL BIT(6)
+#define ODEN_CMD BIT(1)
+#define ODEN_DAT 0xFF
+#define REN_STRB BIT(0)
+#define REN_CMND BIT(1)
+#define REN_DATA 0xFF
+#define PU_CMD BIT(1)
+#define PU_DAT 0xFF
+#define ITAPDLY_EN BIT(0)
+#define OTAPDLY_EN BIT(0)
+#define OD_REL_CMD BIT(1)
+#define OD_REL_DAT 0xFF
+#define DLLTRM_ICP 0x8
+#define PDB_CMND BIT(0)
+#define PDB_DATA 0xFF
+#define PDB_STRB BIT(0)
+#define PDB_CLOCK BIT(0)
+#define CALDONE_MASK 0x10
+#define DLL_RDY_MASK 0x10
+#define MAX_CLK_BUF 0x7
+
+/* Mode Controls */
+#define ENHSTRB_MODE BIT(0)
+#define HS400_MODE BIT(1)
+#define LEGACY_MODE BIT(2)
+#define DDR50_MODE BIT(3)
+
+/*
+ * Controller has no specific bits for HS200/HS.
+ * Used BIT(4), BIT(5) for software programming.
+ */
+#define HS200_MODE BIT(4)
+#define HISPD_MODE BIT(5)
+
+#define OTAPDLY(x) (((x) << 1) | OTAPDLY_EN)
+#define ITAPDLY(x) (((x) << 1) | ITAPDLY_EN)
+#define FREQSEL(x) (((x) << 5) | DLL_ENBL)
+#define IOPAD(x, y) ((x) | ((y) << 2))
+
+/* Arasan private data */
+struct arasan_host {
+ u32 chg_clk;
+};
+
+static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ val = sdhci_readw(host, PHY_ADDR_REG);
+ if (!(val & mask))
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+static int arasan_phy_write(struct sdhci_host *host, u8 data, u8 offset)
+{
+ sdhci_writew(host, data, PHY_DAT_REG);
+ sdhci_writew(host, (PHY_WRITE | offset), PHY_ADDR_REG);
+ return arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+}
+
+static int arasan_phy_read(struct sdhci_host *host, u8 offset, u8 *data)
+{
+ int ret;
+
+ sdhci_writew(host, 0, PHY_DAT_REG);
+ sdhci_writew(host, offset, PHY_ADDR_REG);
+ ret = arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+
+ /* Masking valid data bits */
+ *data = sdhci_readw(host, PHY_DAT_REG) & DATA_MASK;
+ return ret;
+}
+
+static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ int ret;
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ ret = arasan_phy_read(host, offset, &val);
+ if (ret)
+ return -EBUSY;
+ else if (val & mask)
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+/* Initialize the Arasan PHY */
+static int arasan_phy_init(struct sdhci_host *host)
+{
+ int ret;
+ u8 val;
+
+ /* Program IOPADs and wait for calibration to be done */
+ if (arasan_phy_read(host, IPAD_CTRL1, &val) ||
+ arasan_phy_write(host, val | RETB_ENBL | PDB_ENBL, IPAD_CTRL1) ||
+ arasan_phy_read(host, IPAD_CTRL2, &val) ||
+ arasan_phy_write(host, val | RTRIM_EN, IPAD_CTRL2))
+ return -EBUSY;
+ ret = arasan_phy_sts_poll(host, IPAD_STS, CALDONE_MASK);
+ if (ret)
+ return -EBUSY;
+
+ /* Program CMD/Data lines */
+ if (arasan_phy_read(host, IOREN_CTRL1, &val) ||
+ arasan_phy_write(host, val | REN_CMND | REN_STRB, IOREN_CTRL1) ||
+ arasan_phy_read(host, IOPU_CTRL1, &val) ||
+ arasan_phy_write(host, val | PU_CMD, IOPU_CTRL1) ||
+ arasan_phy_read(host, CMD_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CMND, CMD_CTRL) ||
+ arasan_phy_read(host, IOREN_CTRL2, &val) ||
+ arasan_phy_write(host, val | REN_DATA, IOREN_CTRL2) ||
+ arasan_phy_read(host, IOPU_CTRL2, &val) ||
+ arasan_phy_write(host, val | PU_DAT, IOPU_CTRL2) ||
+ arasan_phy_read(host, DATA_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_DATA, DATA_CTRL) ||
+ arasan_phy_read(host, STRB_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_STRB, STRB_CTRL) ||
+ arasan_phy_read(host, CLK_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CLOCK, CLK_CTRL) ||
+ arasan_phy_read(host, CLKBUF_SEL, &val) ||
+ arasan_phy_write(host, val | MAX_CLK_BUF, CLKBUF_SEL) ||
+ arasan_phy_write(host, LEGACY_MODE, MODE_CTRL))
+ return -EBUSY;
+ return 0;
+}
+
+/* Set Arasan PHY for different modes */
+static int arasan_phy_set(struct sdhci_host *host, u8 mode, u8 otap,
+ u8 drv_type, u8 itap, u8 trim, u8 clk)
+{
+ u8 val;
+ int ret;
+
+ if (mode == HISPD_MODE || mode == HS200_MODE)
+ ret = arasan_phy_write(host, 0x0, MODE_CTRL);
+ else
+ ret = arasan_phy_write(host, mode, MODE_CTRL);
+ if (ret)
+ return ret;
+ if (mode == HS400_MODE || mode == HS200_MODE) {
+ ret = arasan_phy_read(host, IPAD_CTRL1, &val);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, IOPAD(val, drv_type), IPAD_CTRL1);
+ if (ret)
+ return ret;
+ }
+ if (mode == LEGACY_MODE) {
+ ret = arasan_phy_write(host, 0x0, OTAP_DELAY);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ } else {
+ ret = arasan_phy_write(host, OTAPDLY(otap), OTAP_DELAY);
+ if (ret)
+ return ret;
+ if (mode != HS200_MODE)
+ ret = arasan_phy_write(host, ITAPDLY(itap), ITAP_DELAY);
+ else
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ }
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, trim, DLL_TRIM);
+ if (ret)
+ return ret;
+ }
+ ret = arasan_phy_write(host, 0, DLL_STATUS);
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, FREQSEL(clk), DLL_STATUS);
+ if (ret)
+ return ret;
+ ret = arasan_phy_sts_poll(host, DLL_STATUS, DLL_RDY_MASK);
+ if (ret)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int arasan_select_phy_clock(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct arasan_host *arasan_host = sdhci_pci_priv(slot);
+ u8 clk;
+
+ if (arasan_host->chg_clk == host->mmc->ios.clock)
+ return 0;
+
+ arasan_host->chg_clk = host->mmc->ios.clock;
+ if (host->mmc->ios.clock == 200000000)
+ clk = 0x0;
+ else if (host->mmc->ios.clock == 100000000)
+ clk = 0x2;
+ else if (host->mmc->ios.clock == 50000000)
+ clk = 0x1;
+ else
+ clk = 0x0;
+
+ if (host->mmc_host_ops.hs400_enhanced_strobe) {
+ arasan_phy_set(host, ENHSTRB_MODE, 1, 0x0, 0x0,
+ DLLTRM_ICP, clk);
+ } else {
+ switch (host->mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ arasan_phy_set(host, LEGACY_MODE, 0x0, 0x0, 0x0,
+ 0x0, 0x0);
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ arasan_phy_set(host, HISPD_MODE, 0x3, 0x0, 0x2,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ arasan_phy_set(host, HS200_MODE, 0x2,
+ host->mmc->ios.drv_type, 0x0,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ arasan_phy_set(host, DDR50_MODE, 0x1, 0x0,
+ 0x0, DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ arasan_phy_set(host, HS400_MODE, 0x1,
+ host->mmc->ios.drv_type, 0xa,
+ DLLTRM_ICP, clk);
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int arasan_pci_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err;
+
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_8_BIT_DATA;
+ err = arasan_phy_init(slot->host);
+ if (err)
+ return -ENODEV;
+ return 0;
+}
+
+static void arasan_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ sdhci_set_clock(host, clock);
+
+ /* Change phy settings for the new clock */
+ arasan_select_phy_clock(host);
+}
+
+static const struct sdhci_ops arasan_sdhci_pci_ops = {
+ .set_clock = arasan_sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_arasan = {
+ .probe_slot = arasan_pci_probe_slot,
+ .ops = &arasan_sdhci_pci_ops,
+ .priv_size = sizeof(struct arasan_host),
+};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 3e4f04fd5175..6d1a983e6227 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -30,17 +30,37 @@
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#include "cqhci.h"
+
#include "sdhci.h"
#include "sdhci-pci.h"
-static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
#ifdef CONFIG_PM_SLEEP
-static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+{
+ mmc_pm_flag_t pm_flags = 0;
+ int i;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ struct sdhci_pci_slot *slot = chip->slots[i];
+
+ if (slot)
+ pm_flags |= slot->host->mmc->pm_flags;
+ }
+
+ return device_set_wakeup_enable(&chip->pdev->dev,
+ (pm_flags & MMC_PM_KEEP_POWER) &&
+ (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+}
+
+static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
{
int i, ret;
+ sdhci_pci_init_wakeup(chip);
+
for (i = 0; i < chip->num_slots; i++) {
struct sdhci_pci_slot *slot = chip->slots[i];
struct sdhci_host *host;
@@ -56,9 +76,6 @@ static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
ret = sdhci_suspend_host(host);
if (ret)
goto err_pci_suspend;
-
- if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)
- sdhci_enable_irq_wakeups(host);
}
return 0;
@@ -69,52 +86,44 @@ err_pci_suspend:
return ret;
}
-static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
{
- mmc_pm_flag_t pm_flags = 0;
- int i;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
for (i = 0; i < chip->num_slots; i++) {
- struct sdhci_pci_slot *slot = chip->slots[i];
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
- if (slot)
- pm_flags |= slot->host->mmc->pm_flags;
+ ret = sdhci_resume_host(slot->host);
+ if (ret)
+ return ret;
}
- return device_init_wakeup(&chip->pdev->dev,
- (pm_flags & MMC_PM_KEEP_POWER) &&
- (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+ return 0;
}
-static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
{
int ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
if (ret)
return ret;
- sdhci_pci_init_wakeup(chip);
-
- return 0;
+ return sdhci_pci_suspend_host(chip);
}
-int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
{
- struct sdhci_pci_slot *slot;
- int i, ret;
-
- for (i = 0; i < chip->num_slots; i++) {
- slot = chip->slots[i];
- if (!slot)
- continue;
+ int ret;
- ret = sdhci_resume_host(slot->host);
- if (ret)
- return ret;
- }
+ ret = sdhci_pci_resume_host(chip);
+ if (ret)
+ return ret;
- return 0;
+ return cqhci_resume(chip->slots[0]->host->mmc);
}
#endif
@@ -166,8 +175,48 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
return 0;
}
+
+static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
+ if (ret)
+ return ret;
+
+ return sdhci_pci_runtime_suspend_host(chip);
+}
+
+static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = sdhci_pci_runtime_resume_host(chip);
+ if (ret)
+ return ret;
+
+ return cqhci_resume(chip->slots[0]->host->mmc);
+}
#endif
+static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_pci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -583,6 +632,18 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
.voltage_switch = sdhci_intel_voltage_switch,
};
+static const struct sdhci_ops sdhci_intel_glk_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .voltage_switch = sdhci_intel_voltage_switch,
+ .irq = sdhci_cqhci_irq,
+};
+
static void byt_read_dsm(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
@@ -612,12 +673,80 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
+ }
+
+ return ret;
+}
+
+static void glk_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ /*
+ * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
+ * the case after tuning, so ensure the buffer is drained.
+ */
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
}
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops glk_cqhci_ops = {
+ .enable = glk_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_pci_dumpregs,
+};
+
+static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
+{
+ struct device *dev = &slot->chip->pdev->dev;
+ struct sdhci_host *host = slot->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + 0x200;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->ops = &glk_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
return ret;
}
@@ -699,11 +828,20 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.allow_runtime_pm = true,
.probe_slot = glk_emmc_probe_slot,
+ .add_host = glk_emmc_add_host,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = sdhci_cqhci_suspend,
+ .resume = sdhci_cqhci_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = sdhci_cqhci_runtime_suspend,
+ .runtime_resume = sdhci_cqhci_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
- .ops = &sdhci_intel_byt_ops,
+ .ops = &sdhci_intel_glk_ops,
.priv_size = sizeof(struct intel_host),
};
@@ -778,6 +916,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
+ /* Advertise 2.0v for compatibility with the SDIO card's OCR */
+ slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_POWER_OFF_CARD;
break;
@@ -955,7 +1095,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i, ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = sdhci_pci_suspend_host(chip);
if (ret)
return ret;
@@ -965,8 +1105,6 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
- sdhci_pci_init_wakeup(chip);
-
return 0;
}
@@ -1306,6 +1444,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+ SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
@@ -1320,7 +1459,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
* *
\*****************************************************************************/
-static int sdhci_pci_enable_dma(struct sdhci_host *host)
+int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
@@ -1543,10 +1682,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
}
- host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+ if (device_can_wakeup(&pdev->dev))
+ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
slot->cd_override_level, 0, NULL);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 0056f08a29cc..5cbcdc448f98 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,9 @@
#define PCI_SUBDEVICE_ID_NI_7884 0x7884
+#define PCI_VENDOR_ID_ARASAN 0x16e6
+#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670
+
/*
* PCI device class and mask
*/
@@ -170,11 +173,13 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
-
+int sdhci_pci_enable_dma(struct sdhci_host *host);
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
#endif
+extern const struct sdhci_pci_fixes sdhci_arasan;
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 8c0f88428556..14511526a3a8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -82,6 +82,10 @@ static int sdhci_probe(struct platform_device *pdev)
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto err_host;
+ }
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
sdhci = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 0842bbc2d7ad..4d0791f6ec23 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
+static void xenon_voltage_switch(struct sdhci_host *host)
+{
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ usleep_range(5000, 5500);
+}
+
static const struct sdhci_ops sdhci_xenon_ops = {
+ .voltage_switch = xenon_voltage_switch,
.set_clock = sdhci_set_clock,
.set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e9290a3439d5..070aff9c108f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1434,6 +1434,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
if (mode != MMC_POWER_OFF) {
switch (1 << vdd) {
case MMC_VDD_165_195:
+ /*
+ * Without a regulator, SDHCI does not support 2.0v
+ * so we only get here if the driver deliberately
+ * added the 2.0v range to ocr_avail. Map it to 1.8v
+ * for the purpose of turning on the power.
+ */
+ case MMC_VDD_20_21:
pwr = SDHCI_POWER_180;
break;
case MMC_VDD_29_30:
@@ -2821,25 +2828,33 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
* sdhci_disable_irq_wakeups() since it will be set by
* sdhci_enable_card_detection() or sdhci_init().
*/
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
+ SDHCI_WAKE_ON_INT;
+ u32 irq_val = 0;
+ u8 wake_val = 0;
u8 val;
- u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
- | SDHCI_WAKE_ON_INT;
- u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
- SDHCI_INT_CARD_INT;
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= mask ;
- /* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
- val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
- irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
+ irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
}
+
+ wake_val |= SDHCI_WAKE_ON_INT;
+ irq_val |= SDHCI_INT_CARD_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ val |= wake_val;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
+
+ host->irq_wake_enabled = !enable_irq_wake(host->irq);
+
+ return host->irq_wake_enabled;
}
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
@@ -2850,6 +2865,10 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
+ disable_irq_wake(host->irq);
+
+ host->irq_wake_enabled = false;
}
int sdhci_suspend_host(struct sdhci_host *host)
@@ -2858,15 +2877,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
mmc_retune_timer_stop(host->mmc);
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (!device_may_wakeup(mmc_dev(host->mmc)) ||
+ !sdhci_enable_irq_wakeups(host)) {
host->ier = 0;
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
- } else {
- sdhci_enable_irq_wakeups(host);
- enable_irq_wake(host->irq);
}
+
return 0;
}
@@ -2894,15 +2912,14 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (host->irq_wake_enabled) {
+ sdhci_disable_irq_wakeups(host);
+ } else {
ret = request_threaded_irq(host->irq, sdhci_irq,
sdhci_thread_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
- } else {
- sdhci_disable_irq_wakeups(host);
- disable_irq_wake(host->irq);
}
sdhci_enable_card_detection(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 54bc444c317f..afab26fd70e6 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -484,6 +484,7 @@ struct sdhci_host {
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
bool pending_reset; /* Cmd/data reset is pending */
+ bool irq_wake_enabled; /* IRQ wakeup is enabled */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -718,7 +719,6 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
#ifdef CONFIG_PM
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
-void sdhci_enable_irq_wakeups(struct sdhci_host *host);
int sdhci_runtime_suspend_host(struct sdhci_host *host);
int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 04ca0d33a521..485f7591fae4 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -10,9 +10,11 @@
* the Free Software Foundation, version 2 of the License.
*/
+#include <linux/acpi.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/clk.h>
@@ -146,7 +148,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- sdhci_get_of_property(pdev);
host->hw_name = "f_sdh30";
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
@@ -158,25 +159,29 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
goto err;
}
- priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(priv->clk_iface)) {
- ret = PTR_ERR(priv->clk_iface);
- goto err;
- }
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
- ret = clk_prepare_enable(priv->clk_iface);
- if (ret)
- goto err;
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
- priv->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto err_clk;
- }
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_clk;
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
/* init vendor specific regs */
ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
@@ -226,16 +231,27 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id f_sdh30_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-sdhci-3.0" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id f_sdh30_acpi_ids[] = {
+ { "SCX0002" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids);
+#endif
static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
- .of_match_table = f_sdh30_dt_ids,
+ .of_match_table = of_match_ptr(f_sdh30_dt_ids),
+ .acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 53fb18bb7bee..7bb00c68a756 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -916,7 +916,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
- u32 opc = cmd->opcode;
+ u32 opc;
u32 mask = 0;
unsigned long flags;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index cc98355dbdb9..bad612d6f879 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -3,7 +3,7 @@
* (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
* (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
* (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
- * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
+ * (C) Copyright 2013-2014 David Lanzendörfer <david.lanzendoerfer@o2s.ch>
* (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
* (C) Copyright 2017 Sootech SA
*
@@ -1255,6 +1255,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
goto error_assert_reset;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto error_assert_reset;
+ }
+
return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host);
@@ -1393,5 +1398,5 @@ module_platform_driver(sunxi_mmc_driver);
MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>");
+MODULE_AUTHOR("David Lanzendörfer <david.lanzendoerfer@o2s.ch>");
MODULE_ALIAS("platform:sunxi-mmc");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 64b7e9f18361..43a2ea5cff24 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -92,14 +92,19 @@ static int tmio_mmc_probe(struct platform_device *pdev)
pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
- host = tmio_mmc_host_alloc(pdev);
- if (!host)
+ host = tmio_mmc_host_alloc(pdev, pdata);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
goto cell_disable;
+ }
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res) >> 10;
- ret = tmio_mmc_host_probe(host, pdata, NULL);
+ host->mmc->f_max = pdata->hclk;
+ host->mmc->f_min = pdata->hclk / 512;
+
+ ret = tmio_mmc_host_probe(host);
if (ret)
goto host_free;
@@ -128,15 +133,11 @@ out:
static int tmio_mmc_remove(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
- if (mmc) {
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- tmio_mmc_host_remove(host);
- if (cell->disable)
- cell->disable(pdev);
- }
+ tmio_mmc_host_remove(host);
+ if (cell->disable)
+ cell->disable(pdev);
return 0;
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 3e6ff8921440..e7d651352dc9 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -112,12 +112,6 @@
struct tmio_mmc_data;
struct tmio_mmc_host;
-struct tmio_mmc_dma {
- enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
- void (*enable)(struct tmio_mmc_host *host, bool enable);
-};
-
struct tmio_mmc_dma_ops {
void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
void (*enable)(struct tmio_mmc_host *host, bool enable);
@@ -134,6 +128,7 @@ struct tmio_mmc_host {
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
+ struct mmc_host_ops ops;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
@@ -144,18 +139,15 @@ struct tmio_mmc_host {
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
- unsigned long bus_shift;
+ unsigned int bus_shift;
struct platform_device *pdev;
struct tmio_mmc_data *pdata;
- struct tmio_mmc_dma *dma;
/* DMA support */
bool force_pio;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
- struct completion dma_dataend;
- struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
@@ -174,7 +166,6 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
- u32 scc_tappos;
/* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
@@ -185,9 +176,6 @@ struct tmio_mmc_host {
void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
- int (*card_busy)(struct mmc_host *mmc);
- int (*start_signal_voltage_switch)(struct mmc_host *mmc,
- struct mmc_ios *ios);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*hw_reset)(struct tmio_mmc_host *host);
void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
@@ -207,11 +195,10 @@ struct tmio_mmc_host {
const struct tmio_mmc_dma_ops *dma_ops;
};
-struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata);
void tmio_mmc_host_free(struct tmio_mmc_host *host);
-int tmio_mmc_host_probe(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops);
+int tmio_mmc_host_probe(struct tmio_mmc_host *host);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
@@ -240,26 +227,26 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift));
+ return ioread16(host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
int addr)
{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+ return ioread16(host->ctl + (addr << host->bus_shift)) |
+ ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
- readsl(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
@@ -270,26 +257,26 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
*/
if (host->write16_hook && host->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->bus_shift));
+ iowrite16(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
int addr, u32 val)
{
- writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ iowrite16(val & 0xffff, host->ctl + (addr << host->bus_shift));
+ iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
- writesl(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
#endif
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 583bf3262df5..33494241245a 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -806,7 +806,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret == 0)
set_bit(i, host->taps);
- mdelay(1);
+ usleep_range(1000, 1200);
}
ret = host->select_tuning(host);
@@ -926,20 +926,6 @@ static void tmio_mmc_done_work(struct work_struct *work)
tmio_mmc_finish_request(host);
}
-static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
-{
- if (!host->clk_enable)
- return -ENOTSUPP;
-
- return host->clk_enable(host);
-}
-
-static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
-{
- if (host->clk_disable)
- host->clk_disable(host);
-}
-
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -958,7 +944,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* 100us were not enough. Is this the same 140us delay, as in
* tmio_mmc_set_ios()?
*/
- udelay(200);
+ usleep_range(200, 300);
}
/*
* It seems, VccQ should be switched on after Vcc, this is also what the
@@ -966,7 +952,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
ret = regulator_enable(mmc->supply.vqmmc);
- udelay(200);
+ usleep_range(200, 300);
}
if (ret < 0)
@@ -1059,7 +1045,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* Let things settle. delay taken from winCE driver */
- udelay(140);
+ usleep_range(140, 200);
if (PTR_ERR(host->mrq) == -EINTR)
dev_dbg(&host->pdev->dev,
"%s.%d: IOS interrupted: clk %u, mode %u",
@@ -1076,15 +1062,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_ro(mmc);
-
- if (ret >= 0)
- return ret;
-
- ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
- return ret;
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_multi_io_quirk(struct mmc_card *card,
@@ -1098,7 +1078,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static struct mmc_host_ops tmio_mmc_ops = {
+static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -1145,19 +1125,45 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
}
-struct tmio_mmc_host*
-tmio_mmc_host_alloc(struct platform_device *pdev)
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
+ struct resource *res;
+ void __iomem *ctl;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctl = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctl))
+ return ERR_CAST(ctl);
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
if (!mmc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
host = mmc_priv(mmc);
+ host->ctl = ctl;
host->mmc = mmc;
host->pdev = pdev;
+ host->pdata = pdata;
+ host->ops = tmio_mmc_ops;
+ mmc->ops = &host->ops;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret) {
+ host = ERR_PTR(ret);
+ goto free;
+ }
+
+ tmio_mmc_of_parse(pdev, pdata);
+
+ platform_set_drvdata(pdev, host);
+
+ return host;
+free:
+ mmc_free_host(mmc);
return host;
}
@@ -1169,32 +1175,24 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops)
+int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
+ struct tmio_mmc_data *pdata = _host->pdata;
struct mmc_host *mmc = _host->mmc;
- struct resource *res_ctl;
int ret;
u32 irq_mask = TMIO_MASK_CMD;
- tmio_mmc_of_parse(pdev, pdata);
+ /*
+ * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * looping forever...
+ */
+ if (mmc->f_min == 0)
+ return -EINVAL;
if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
_host->write16_hook = NULL;
- res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_ctl)
- return -EINVAL;
-
- ret = mmc_of_parse(mmc);
- if (ret < 0)
- return ret;
-
- _host->pdata = pdata;
- platform_set_drvdata(pdev, mmc);
-
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
@@ -1202,15 +1200,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (ret < 0)
return ret;
- _host->ctl = devm_ioremap(&pdev->dev,
- res_ctl->start, resource_size(res_ctl));
- if (!_host->ctl)
- return -ENOMEM;
-
- tmio_mmc_ops.card_busy = _host->card_busy;
- tmio_mmc_ops.start_signal_voltage_switch =
- _host->start_signal_voltage_switch;
- mmc->ops = &tmio_mmc_ops;
+ if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
+ if (ret)
+ return ret;
+ }
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
@@ -1233,7 +1227,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
}
mmc->max_seg_size = mmc->max_req_size;
- _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
+ if (mmc_can_gpio_ro(mmc))
+ _host->ops.get_ro = mmc_gpio_get_ro;
+
+ _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
@@ -1246,18 +1243,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (pdata->flags & TMIO_MMC_MIN_RCAR2)
_host->native_hotplug = true;
- if (tmio_mmc_clk_enable(_host) < 0) {
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
- }
-
- /*
- * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
- * looping forever...
- */
- if (mmc->f_min == 0)
- return -EINVAL;
-
/*
* While using internal tmio hardware logic for card detection, we need
* to ensure it stays powered for it to work.
@@ -1293,7 +1278,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
- _host->dma_ops = dma_ops;
tmio_mmc_request_dma(_host, pdata);
pm_runtime_set_active(&pdev->dev);
@@ -1307,14 +1291,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
- if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret)
- goto remove_host;
-
- mmc_gpiod_request_cd_irq(mmc);
- }
-
return 0;
remove_host:
@@ -1343,16 +1319,27 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- tmio_mmc_clk_disable(host);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
#ifdef CONFIG_PM
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
+{
+ if (!host->clk_enable)
+ return -ENOTSUPP;
+
+ return host->clk_enable(host);
+}
+
+static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
+{
+ if (host->clk_disable)
+ host->clk_disable(host);
+}
+
int tmio_mmc_host_runtime_suspend(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
@@ -1372,8 +1359,7 @@ static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
int tmio_mmc_host_runtime_resume(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_reset(host);
tmio_mmc_clk_enable(host);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 0806f72102c0..a85af236b44d 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -904,9 +904,6 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
- if (from + len > mtd->size)
- return -EINVAL;
-
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
@@ -990,36 +987,6 @@ err_in_read:
goto out;
}
-/**
- * doc_read - Read bytes from flash
- * @mtd: the device
- * @from: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
- *
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
- *
- * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
- */
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_oob_ops ops;
- size_t ret;
-
- memset(&ops, 0, sizeof(ops));
- ops.datbuf = buf;
- ops.len = len;
- ops.mode = MTD_OPS_AUTO_OOB;
-
- ret = doc_read_oob(mtd, from, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static int doc_reload_bbt(struct docg3 *docg3)
{
int block = DOC_LAYOUT_BLOCK_BBT;
@@ -1471,8 +1438,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
- if (ofs + len > mtd->size)
- return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
@@ -1513,39 +1478,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
return ret;
}
-/**
- * doc_write - Write a buffer to the chip
- * @mtd: the device
- * @to: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to write (must be a full page size, ie. 512)
- * @retlen: the number of bytes actually written (0 or 512)
- * @buf: the buffer to get bytes from
- *
- * Writes data to the chip.
- *
- * Returns 0 if write successful, -EIO if write error
- */
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct docg3 *docg3 = mtd->priv;
- int ret;
- struct mtd_oob_ops ops;
-
- doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
- ops.datbuf = (char *)buf;
- ops.len = len;
- ops.mode = MTD_OPS_PLACE_OOB;
- ops.oobbuf = NULL;
- ops.ooblen = 0;
- ops.ooboffs = 0;
-
- ret = doc_write_oob(mtd, to, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static struct docg3 *sysfs_dev2docg3(struct device *dev,
struct device_attribute *attr)
{
@@ -1866,8 +1798,6 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
mtd->_read_oob = doc_read_oob;
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index dbe6a1de2bb8..a4e18f6aaa33 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -307,10 +307,18 @@ static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = spi_get_drvdata(spi);
+ spi_nor_restore(&flash->spi_nor);
+
/* Clean up MTD stuff. */
return mtd_device_unregister(&flash->spi_nor.mtd);
}
+static void m25p_shutdown(struct spi_device *spi)
+{
+ struct m25p *flash = spi_get_drvdata(spi);
+
+ spi_nor_restore(&flash->spi_nor);
+}
/*
* Do NOT add to this array without reading the following:
*
@@ -386,6 +394,7 @@ static struct spi_driver m25p80_driver = {
.id_table = m25p_ids,
.probe = m25p_probe,
.remove = m25p_remove,
+ .shutdown = m25p_shutdown,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 8956b7dcc984..75f71d166fd6 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -68,6 +68,7 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -84,12 +85,16 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
@@ -100,6 +105,7 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -117,12 +123,16 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 73b605577447..28553c840d32 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -503,6 +503,11 @@ int add_mtd_device(struct mtd_info *mtd)
return -EEXIST;
BUG_ON(mtd->writesize == 0);
+
+ if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ !(mtd->flags & MTD_NO_ERASE)))
+ return -EINVAL;
+
mutex_lock(&mtd_table_mutex);
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -1053,7 +1058,20 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
* representing the maximum number of bitflips that were corrected on
* any one ecc region (if applicable; zero otherwise).
*/
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (mtd->_read) {
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ } else if (mtd->_read_oob) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+
+ ret_code = mtd->_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ } else {
+ return -ENOTSUPP;
+ }
+
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -1068,11 +1086,25 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
*retlen = 0;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
- if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ if ((!mtd->_write && !mtd->_write_oob) ||
+ !(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!len)
return 0;
ledtrig_mtd_activity();
+
+ if (!mtd->_write) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = mtd->_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+ }
+
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index be088bccd593..76cd21d1171b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -105,34 +105,17 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = mtd_to_part(mtd);
+ struct mtd_ecc_stats stats;
int res;
- if (from >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && from + ops->len > mtd->size)
- return -EINVAL;
-
- /*
- * If OOB is also requested, make sure that we do not read past the end
- * of this partition.
- */
- if (ops->oobbuf) {
- size_t len, pages;
-
- len = mtd_oobavail(mtd, ops);
- pages = mtd_div_by_ws(mtd->size, mtd);
- pages -= mtd_div_by_ws(from, mtd);
- if (ops->ooboffs + ops->ooblen > pages * len)
- return -EINVAL;
- }
-
+ stats = part->parent->ecc_stats;
res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(res)) {
- if (mtd_is_bitflip(res))
- mtd->ecc_stats.corrected++;
- if (mtd_is_eccerr(res))
- mtd->ecc_stats.failed++;
- }
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->parent->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->parent->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -189,10 +172,6 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
{
struct mtd_part *part = mtd_to_part(mtd);
- if (to >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && to + ops->len > mtd->size)
- return -EINVAL;
return part->parent->_write_oob(part->parent, to + part->offset, ops);
}
@@ -435,8 +414,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
parent->dev.parent;
slave->mtd.dev.of_node = part->of_node;
- slave->mtd._read = part_read;
- slave->mtd._write = part_write;
+ if (parent->_read)
+ slave->mtd._read = part_read;
+ if (parent->_write)
+ slave->mtd._write = part_write;
if (parent->_panic_write)
slave->mtd._panic_write = part_panic_write;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f07492c6f4b2..7eb0e1f4f980 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1223,8 +1223,9 @@ static int mtdswap_show(struct seq_file *s, void *data)
unsigned int max[MTDSWAP_TREE_CNT];
unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
uint64_t use_size;
- char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
- "failing"};
+ static const char * const name[] = {
+ "clean", "used", "low", "high", "dirty", "bitflip", "failing"
+ };
mutex_lock(&d->mbd_dev->lock);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index bb48aafed9a2..e6b8c59f2c0d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -315,6 +315,7 @@ config MTD_NAND_ATMEL
config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
+ depends on !MTD_NAND_MARVELL
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
@@ -323,6 +324,18 @@ config MTD_NAND_PXA3xx
platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
platforms (7K, 8K) (NFCv2).
+config MTD_NAND_MARVELL
+ tristate "NAND controller support on Marvell boards"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
+ COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller driver for Marvell boards,
+ including:
+ - PXA3xx processors (NFCv1)
+ - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
depends on ARCH_LPC32XX
@@ -376,9 +389,7 @@ config MTD_NAND_GPMI_NAND
Enables NAND Flash support for IMX23, IMX28 or IMX6.
The GPMI controller is very powerful, with the help of BCH
module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time. The GPMI may conflicts with other
- block, such as SD card. So pay attention to it when you enable
- the GPMI.
+ NAND flashs at the same time.
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 118a1349aad3..921634ba400c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 90a71a56bc23..b2f00b398490 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -841,6 +841,8 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
struct atmel_nand *nand = to_atmel_nand(chip);
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
if (ret)
return ret;
@@ -857,7 +859,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -881,6 +883,8 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
if (ret)
return ret;
@@ -1000,7 +1004,7 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
* to the non-optimized one.
*/
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
raw);
@@ -1178,7 +1182,6 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
return 0;
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 5655dca6ce43..87bbd177b3e5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -572,6 +572,8 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -582,10 +584,10 @@ static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/*
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index dd56a671ea42..c28fd2bc1a84 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1071,7 +1071,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(chip, NULL);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
ret = bcmnand_ctrl_poll_status(ctrl,
NAND_CTRL_RDY |
@@ -1453,7 +1453,7 @@ static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
/* At FC_BYTES boundary, switch to next column */
if (host->last_byte > 0 && offs == 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
ret = ctrl->flash_cache[offs];
break;
@@ -1681,7 +1681,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
int ret;
if (!buf) {
- buf = chip->buffers->databuf;
+ buf = chip->data_buf;
/* Invalidate page cache */
chip->pagebuf = -1;
}
@@ -1689,7 +1689,6 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
sas = mtd->oobsize / chip->ecc.steps;
/* read without ecc for verification */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
if (ret)
return ret;
@@ -1793,6 +1792,8 @@ static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
}
@@ -1804,6 +1805,8 @@ static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -1909,8 +1912,10 @@ static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_page_raw(struct mtd_info *mtd,
@@ -1920,10 +1925,12 @@ static int brcmnand_write_page_raw(struct mtd_info *mtd,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -2193,16 +2200,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
if (ctrl->nand_version >= 0x0702)
tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
- if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
- /*
- * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
- * errors
- */
- if (has_flash_dma(ctrl))
- tmp &= ~ACC_CONTROL_PREFETCH;
- else
- tmp |= ACC_CONTROL_PREFETCH;
- }
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
nand_writereg(ctrl, offs, tmp);
return 0;
@@ -2230,6 +2230,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_set_controller_data(chip, host);
mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
@@ -2369,12 +2372,11 @@ static int brcmnand_resume(struct device *dev)
list_for_each_entry(host, &ctrl->host_list, node) {
struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(chip);
}
return 0;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index bc558c438a57..567ff972d5fc 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -353,23 +353,15 @@ static void cafe_nand_bug(struct mtd_info *mtd)
static int cafe_nand_write_oob(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status = 0;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
@@ -391,7 +383,7 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
cafe_readl(cafe, NAND_ECC_RESULT),
cafe_readl(cafe, NAND_ECC_SYN01));
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
@@ -549,13 +541,13 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -613,7 +605,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
uint32_t ctrl;
int err = 0;
int old_dma;
- struct nand_buffers *nbuf;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -661,7 +652,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -731,32 +721,20 @@ static int cafe_nand_probe(struct pci_dev *pdev,
if (err)
goto out_irq;
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- &cafe->dmaaddr, GFP_KERNEL);
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
if (!cafe->dmabuf) {
err = -ENOMEM;
goto out_irq;
}
- cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
/* Set up DMA address */
- cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
- if (sizeof(cafe->dmaaddr) > 4)
- /* Shift in two parts to shut the compiler up */
- cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
- else
- cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
- /* this driver does not need the @ecccalc and @ecccode */
- nbuf->ecccalc = NULL;
- nbuf->ecccode = NULL;
- nbuf->databuf = (uint8_t *)(nbuf + 1);
-
/* Restore the DMA flag */
usedma = old_dma;
@@ -801,10 +779,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
goto out;
out_free_dma:
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
@@ -829,10 +804,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
nand_release(mtd);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
}
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 5124f8ae8c04..313c7f50621b 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -330,16 +330,12 @@ static int denali_check_erased_page(struct mtd_info *mtd,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
- uint8_t *ecc_code = chip->buffers->ecccode;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- int i, ret, stat;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
+ int i, stat;
for (i = 0; i < ecc_steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
@@ -645,8 +641,6 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int page, int write)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
- unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
uint8_t *bufpoi = chip->oob_poi;
@@ -658,11 +652,11 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int i, pos, len;
/* BBM at the beginning of the OOB area */
- chip->cmdfunc(mtd, start_cmd, writesize, page);
if (write)
- chip->write_buf(mtd, bufpoi, oob_skip);
+ nand_prog_page_begin_op(chip, page, writesize, bufpoi,
+ oob_skip);
else
- chip->read_buf(mtd, bufpoi, oob_skip);
+ nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
bufpoi += oob_skip;
/* OOB ECC */
@@ -675,30 +669,35 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- chip->cmdfunc(mtd, rnd_cmd, pos, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, pos, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, pos, bufpoi, len,
+ false);
bufpoi += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
bufpoi += len;
}
}
/* OOB free */
len = oobsize - (bufpoi - chip->oob_poi);
- chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, size - len, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, size - len, bufpoi, len,
+ false);
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -710,12 +709,12 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int ret, i, pos, len;
- ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
+ ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
if (ret)
return ret;
@@ -730,11 +729,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(buf, dma_buf + pos, len);
+ memcpy(buf, tmp_buf + pos, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(buf, dma_buf + writesize + oob_skip,
+ memcpy(buf, tmp_buf + writesize + oob_skip,
len);
buf += len;
}
@@ -745,7 +744,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(oob, dma_buf + writesize, oob_skip);
+ memcpy(oob, tmp_buf + writesize, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -758,11 +757,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(oob, dma_buf + pos, len);
+ memcpy(oob, tmp_buf + pos, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(oob, dma_buf + writesize + oob_skip,
+ memcpy(oob, tmp_buf + writesize + oob_skip,
len);
oob += len;
}
@@ -770,7 +769,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(oob, dma_buf + size - len, len);
+ memcpy(oob, tmp_buf + size - len, len);
}
return 0;
@@ -788,16 +787,12 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- int status;
denali_reset_irq(denali);
denali_oob_xfer(mtd, chip, page, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -841,7 +836,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int i, pos, len;
@@ -851,7 +846,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* This simplifies the logic.
*/
if (!buf || !oob_required)
- memset(dma_buf, 0xff, size);
+ memset(tmp_buf, 0xff, size);
/* Arrange the buffer for syndrome payload/ecc layout */
if (buf) {
@@ -864,11 +859,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, buf, len);
+ memcpy(tmp_buf + pos, buf, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(dma_buf + writesize + oob_skip, buf,
+ memcpy(tmp_buf + writesize + oob_skip, buf,
len);
buf += len;
}
@@ -879,7 +874,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(dma_buf + writesize, oob, oob_skip);
+ memcpy(tmp_buf + writesize, oob, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -892,11 +887,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, oob, len);
+ memcpy(tmp_buf + pos, oob, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(dma_buf + writesize + oob_skip, oob,
+ memcpy(tmp_buf + writesize + oob_skip, oob,
len);
oob += len;
}
@@ -904,10 +899,10 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(dma_buf + size - len, oob, len);
+ memcpy(tmp_buf + size - len, oob, len);
}
- return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
+ return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
}
static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -951,7 +946,7 @@ static int denali_erase(struct mtd_info *mtd, int page)
irq_status = denali_wait_for_irq(denali,
INTR__ERASE_COMP | INTR__ERASE_FAIL);
- return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
+ return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
}
static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
@@ -1359,7 +1354,6 @@ int denali_init(struct denali_nand_info *denali)
chip->read_buf = denali_read_buf;
chip->write_buf = denali_write_buf;
}
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.read_page = denali_read_page;
chip->ecc.read_page_raw = denali_read_page_raw;
chip->ecc.write_page = denali_write_page;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 2911066dacac..9ad33d237378 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -329,7 +329,7 @@ struct denali_nand_info {
#define DENALI_CAP_DMA_64BIT BIT(1)
int denali_calc_ecc_bytes(int step_size, int strength);
-extern int denali_init(struct denali_nand_info *denali);
-extern void denali_remove(struct denali_nand_info *denali);
+int denali_init(struct denali_nand_info *denali);
+void denali_remove(struct denali_nand_info *denali);
#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 57fb7ae31412..49cb3e1f8bd0 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -125,3 +125,7 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
module_pci_driver(denali_pci_driver);
+
+MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 72671dc52e2e..6bc93ea66f50 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -448,7 +448,7 @@ static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
int status;
DoC_WaitReady(doc);
- this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(this, NULL);
DoC_WaitReady(doc);
status = (int)this->read_byte(mtd);
@@ -595,7 +595,7 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
/* Assert ChipEnable and deassert WriteProtect */
WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(this);
doc->curchip = chip;
doc->curfloor = floor;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 2436cbc71662..72f1327c4430 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -785,6 +785,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_READ_MODE |
DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
@@ -864,7 +866,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
- docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+ nand_read_page_op(nand, page, nand->ecc.size, NULL, 0);
writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
write_nop(docptr);
@@ -900,6 +902,7 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
uint16_t g4_page;
+ int status;
dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
@@ -939,11 +942,15 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
poll_status(doc);
write_nop(docptr);
- return nand->waitfunc(mtd, nand);
+ status = nand->waitfunc(mtd, nand);
+ if (status < 0)
+ return status;
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, bool use_ecc)
+ const uint8_t *buf, int page, bool use_ecc)
{
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
@@ -951,6 +958,8 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s...\n", __func__);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
DOCG4_BCH_SIZE,
@@ -995,19 +1004,19 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
- return 0;
+ return nand_prog_page_end_op(nand);
}
static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, false);
+ return write_page(mtd, nand, buf, page, false);
}
static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, true);
+ return write_page(mtd, nand, buf, page, true);
}
static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 17db2f90aa2c..8b6dcd739ecb 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -713,7 +713,7 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
- fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -729,10 +729,10 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/* ECC will be calculated automatically, and errors will be detected in
@@ -742,10 +742,10 @@ static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, uint32_t data_len,
const uint8_t *buf, int oob_required, int page)
{
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9e03bac7f34c..4872a7ba6503 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -688,7 +688,7 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -711,10 +711,10 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -916,6 +916,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
return 0;
}
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index eac15d9bf49e..f49ed46fa770 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -684,8 +684,8 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int off, len, group = 0;
/*
* ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
@@ -697,7 +697,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
@@ -720,8 +720,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->options & NAND_BUSWIDTH_16)
len = roundup(len, 2);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
- chip->read_buf(mtd, oob + j, len);
+ nand_read_oob_op(chip, page, off, oob + j, len);
j += len;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d4d824ef64e9..61fdd733492f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,11 +1029,13 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
}
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int gpmi_ecc_read_page_data(struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(chip);
void *payload_virt;
dma_addr_t payload_phys;
void *auxiliary_virt;
@@ -1094,8 +1096,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
offset /= 8;
eccbytes -= offset;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- chip->read_buf(mtd, eccbuf, eccbytes);
+ nand_change_read_column_op(chip, offset, eccbuf,
+ eccbytes, false);
/*
* ECC data are not byte aligned and we may have
@@ -1176,6 +1178,14 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
+}
+
/* Fake a virtual small page for the subpage read */
static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf, int page)
@@ -1220,12 +1230,12 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
meta = geo->metadata_size;
if (first) {
col = meta + (size + ecc_parity_size) * first;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
-
meta = 0;
buf = buf + first * size;
}
+ nand_read_page_op(chip, page, col, NULL, 0);
+
/* Save the old environment */
r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
@@ -1254,7 +1264,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
/* Read the subpage now */
this->swap_block_mark = false;
- max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
/* Restore */
writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
@@ -1277,6 +1287,9 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
dev_dbg(this->dev, "ecc write page.\n");
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (this->swap_block_mark) {
/*
* If control arrives here, we're doing block mark swapping.
@@ -1338,7 +1351,10 @@ exit_auxiliary:
payload_virt, payload_phys);
}
- return 0;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -1411,7 +1427,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
memset(chip->oob_poi, ~0, mtd->oobsize);
/* Read out the conventional OOB. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/*
@@ -1421,7 +1437,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
*/
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
chip->oob_poi[0] = chip->read_byte(mtd);
}
@@ -1432,7 +1448,6 @@ static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
struct mtd_oob_region of = { };
- int status = 0;
/* Do we have available oob area? */
mtd_ooblayout_free(mtd, 0, &of);
@@ -1442,12 +1457,8 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
if (!nand_is_slc(chip))
return -EPERM;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
- chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
+ chip->oob_poi + of.offset, of.length);
}
/*
@@ -1477,8 +1488,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
int step;
- chip->read_buf(mtd, tmp_buf,
- mtd->writesize + mtd->oobsize);
+ nand_read_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
/*
* If required, swap the bad block marker and the data stored in the
@@ -1487,12 +1498,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
/*
* Copy the metadata section into the oob buffer (this section is
@@ -1615,31 +1622,22 @@ static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
- chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
-
- return 0;
+ return nand_prog_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
}
static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
}
static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
-
return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1649,7 +1647,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret = 0;
uint8_t *block_mark;
- int column, page, status, chipnr;
+ int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1663,13 +1661,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* Shift to get page */
page = (int)(ofs >> chip->page_shift);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
- chip->write_buf(mtd, block_mark, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- ret = -EIO;
+ ret = nand_prog_page_op(chip, page, column, block_mark, 1);
chip->select_chip(mtd, -1);
@@ -1712,7 +1704,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1737,7 +1729,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
* Read the NCB fingerprint. The fingerprint is four bytes long
* and starts in the 12th byte of the page.
*/
- chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ nand_read_page_op(chip, page, 12, NULL, 0);
chip->read_buf(mtd, buffer, strlen(fingerprint));
/* Look for the fingerprint. */
@@ -1771,7 +1763,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int status;
@@ -1797,17 +1789,10 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
dev_dbg(dev, "Erasing the search area...\n");
for (block = 0; block < search_area_size_in_blocks; block++) {
- /* Compute the page address. */
- page = block * block_size_in_pages;
-
/* Erase this block. */
dev_dbg(dev, "\tErasing block 0x%x\n", block);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-
- /* Wait for the erase to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = nand_erase_op(chip, block);
+ if (status)
dev_err(dev, "[%s] Erase failed.\n", __func__);
}
@@ -1823,13 +1808,9 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- /* Wait for the write to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
+ if (status)
dev_err(dev, "[%s] Write failed.\n", __func__);
}
@@ -1884,7 +1865,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
/* Send the command to read the conventional block mark. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
block_mark = chip->read_byte(mtd);
chip->select_chip(mtd, -1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a45e4ce13d10..06c1f993912c 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -268,31 +268,31 @@ struct timing_threshold {
};
/* Common Services */
-extern int common_nfc_set_geometry(struct gpmi_nand_data *);
-extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-extern void prepare_data_dma(struct gpmi_nand_data *,
- enum dma_data_direction dr);
-extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
-extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
+int common_nfc_set_geometry(struct gpmi_nand_data *);
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
/* GPMI-NAND helper function library */
-extern int gpmi_init(struct gpmi_nand_data *);
-extern int gpmi_extra_init(struct gpmi_nand_data *);
-extern void gpmi_clear_bch(struct gpmi_nand_data *);
-extern void gpmi_dump_info(struct gpmi_nand_data *);
-extern int bch_set_geometry(struct gpmi_nand_data *);
-extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
-extern int gpmi_send_command(struct gpmi_nand_data *);
-extern void gpmi_begin(struct gpmi_nand_data *);
-extern void gpmi_end(struct gpmi_nand_data *);
-extern int gpmi_read_data(struct gpmi_nand_data *);
-extern int gpmi_send_data(struct gpmi_nand_data *);
-extern int gpmi_send_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
-extern int gpmi_read_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_init(struct gpmi_nand_data *);
+int gpmi_extra_init(struct gpmi_nand_data *);
+void gpmi_clear_bch(struct gpmi_nand_data *);
+void gpmi_dump_info(struct gpmi_nand_data *);
+int bch_set_geometry(struct gpmi_nand_data *);
+int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+int gpmi_send_command(struct gpmi_nand_data *);
+void gpmi_begin(struct gpmi_nand_data *);
+void gpmi_end(struct gpmi_nand_data *);
+int gpmi_read_data(struct gpmi_nand_data *);
+int gpmi_send_data(struct gpmi_nand_data *);
+int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
const u8 *src, size_t src_bit_off,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 0897261c3e17..cb862793ab6d 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -544,7 +544,7 @@ static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
int stat_1, stat_2;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/* errors which can not be corrected by ECC */
@@ -574,8 +574,7 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
{
struct hinfc_host *host = nand_get_controller_data(chip);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (host->irq_status & HINFC504_INTS_UE) {
host->irq_status = 0;
@@ -590,11 +589,11 @@ static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void hisi_nfc_host_init(struct hinfc_host *host)
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ad827d4af3e9..613b00a9604b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -313,6 +313,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 id[2];
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
@@ -335,17 +336,16 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
/* Retrieve the IDs from the first chip. */
chip->select_chip(mtd, 0);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- *nand_maf_id = chip->read_byte(mtd);
- *nand_dev_id = chip->read_byte(mtd);
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ *nand_maf_id = id[0];
+ *nand_dev_id = id[1];
} else {
/* Detect additional chip. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- if (*nand_maf_id != chip->read_byte(mtd)
- || *nand_dev_id != chip->read_byte(mtd)) {
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
ret = -ENODEV;
goto notfound_id;
}
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5796468db653..e357948a7505 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -461,7 +461,7 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* Writing Command and Address */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* For all sub-pages */
for (i = 0; i < host->mlcsubpages; i++) {
@@ -522,6 +522,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
memcpy(dma_buf, buf, mtd->writesize);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
for (i = 0; i < host->mlcsubpages; i++) {
/* Start Encode */
writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
@@ -550,7 +552,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
/* Wait for Controller Ready */
lpc32xx_waitfunc_controller(mtd, chip);
}
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index b61f28a1554d..5f7cc6da0a7f 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -399,10 +399,7 @@ static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int
static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -411,17 +408,8 @@ static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/*
@@ -632,7 +620,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Read data and oob, calculate ECC */
status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
@@ -675,7 +663,7 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
int page)
{
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Raw reads can just use the FIFO interface */
chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
@@ -698,6 +686,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
uint8_t *pb;
int error;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
/* Write data, calculate ECC on outbound data */
error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
if (error)
@@ -716,7 +706,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
/* Write ECC data to device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -729,9 +720,11 @@ static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
int oob_required, int page)
{
/* Raw writes can just use the FIFO interface */
- chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ nand_prog_page_begin_op(chip, page, 0, buf,
+ chip->ecc.size * chip->ecc.steps);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
new file mode 100644
index 000000000000..2196f2a233d6
--- /dev/null
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -0,0 +1,2896 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell NAND flash controller driver
+ *
+ * Copyright (C) 2017 Marvell
+ * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
+#define FIFO_DEPTH 8
+#define FIFO_REP(x) (x / sizeof(u32))
+#define BCH_SEQ_READS (32 / FIFO_DEPTH)
+/* NFC does not support transfers of larger chunks at a time */
+#define MAX_CHUNK_SIZE 2112
+/* NFCv1 cannot read more that 7 bytes of ID */
+#define NFCV1_READID_LEN 7
+/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
+#define POLL_PERIOD 0
+#define POLL_TIMEOUT 100000
+/* Interrupt maximum wait period in ms */
+#define IRQ_TIMEOUT 1000
+/* Latency in clock cycles between SoC pins and NFC logic */
+#define MIN_RD_DEL_CNT 3
+/* Maximum number of contiguous address cycles */
+#define MAX_ADDRESS_CYC_NFCV1 5
+#define MAX_ADDRESS_CYC_NFCV2 7
+/* System control registers/bits to enable the NAND controller on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
+#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
+#define GENCONF_CLK_GATING_CTRL 0x220
+#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
+#define GENCONF_ND_CLK_CTRL 0x700
+#define GENCONF_ND_CLK_CTRL_EN BIT(0)
+
+/* NAND controller data flash control register */
+#define NDCR 0x00
+#define NDCR_ALL_INT GENMASK(11, 0)
+#define NDCR_CS1_CMDDM BIT(7)
+#define NDCR_CS0_CMDDM BIT(8)
+#define NDCR_RDYM BIT(11)
+#define NDCR_ND_ARB_EN BIT(12)
+#define NDCR_RA_START BIT(15)
+#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
+#define NDCR_DWIDTH_M BIT(26)
+#define NDCR_DWIDTH_C BIT(27)
+#define NDCR_ND_RUN BIT(28)
+#define NDCR_DMA_EN BIT(29)
+#define NDCR_ECC_EN BIT(30)
+#define NDCR_SPARE_EN BIT(31)
+#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
+ NDCR_DWIDTH_M | NDCR_DWIDTH_C))
+
+/* NAND interface timing parameter 0 register */
+#define NDTR0 0x04
+#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
+#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
+#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
+#define NDTR0_SEL_NRE_EDGE BIT(7)
+#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
+#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
+#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
+#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
+#define NDTR0_SELCNTR BIT(26)
+#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
+
+/* NAND interface timing parameter 1 register */
+#define NDTR1 0x0C
+#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
+#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
+#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
+#define NDTR1_PRESCALE BIT(14)
+#define NDTR1_WAIT_MODE BIT(15)
+#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
+
+/* NAND controller status register */
+#define NDSR 0x14
+#define NDSR_WRCMDREQ BIT(0)
+#define NDSR_RDDREQ BIT(1)
+#define NDSR_WRDREQ BIT(2)
+#define NDSR_CORERR BIT(3)
+#define NDSR_UNCERR BIT(4)
+#define NDSR_CMDD(cs) BIT(8 - cs)
+#define NDSR_RDY(rb) BIT(11 + rb)
+#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
+
+/* NAND ECC control register */
+#define NDECCCTRL 0x28
+#define NDECCCTRL_BCH_EN BIT(0)
+
+/* NAND controller data buffer register */
+#define NDDB 0x40
+
+/* NAND controller command buffer 0 register */
+#define NDCB0 0x48
+#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
+#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
+#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
+#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
+#define NDCB0_DBC BIT(19)
+#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
+#define NDCB0_CSEL BIT(24)
+#define NDCB0_RDY_BYP BIT(27)
+#define NDCB0_LEN_OVRD BIT(28)
+#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
+
+/* NAND controller command buffer 1 register */
+#define NDCB1 0x4C
+#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
+#define NDCB1_ADDRS_PAGE(x) (x << 16)
+
+/* NAND controller command buffer 2 register */
+#define NDCB2 0x50
+#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
+#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
+
+/* NAND controller command buffer 3 register */
+#define NDCB3 0x54
+#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
+#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
+
+/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
+#define TYPE_READ 0
+#define TYPE_WRITE 1
+#define TYPE_ERASE 2
+#define TYPE_READ_ID 3
+#define TYPE_STATUS 4
+#define TYPE_RESET 5
+#define TYPE_NAKED_CMD 6
+#define TYPE_NAKED_ADDR 7
+#define TYPE_MASK 7
+#define XTYPE_MONOLITHIC_RW 0
+#define XTYPE_LAST_NAKED_RW 1
+#define XTYPE_FINAL_COMMAND 3
+#define XTYPE_READ 4
+#define XTYPE_WRITE_DISPATCH 4
+#define XTYPE_NAKED_RW 5
+#define XTYPE_COMMAND_DISPATCH 6
+#define XTYPE_MASK 7
+
+/**
+ * Marvell ECC engine works differently than the others, in order to limit the
+ * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
+ * per subpage, and depending on a the desired strength needed by the NAND chip,
+ * a particular layout mixing data/spare/ecc is defined, with a possible last
+ * chunk smaller that the others.
+ *
+ * @writesize: Full page size on which the layout applies
+ * @chunk: Desired ECC chunk size on which the layout applies
+ * @strength: Desired ECC strength (per chunk size bytes) on which the
+ * layout applies
+ * @nchunks: Total number of chunks
+ * @full_chunk_cnt: Number of full-sized chunks, which is the number of
+ * repetitions of the pattern:
+ * (data_bytes + spare_bytes + ecc_bytes).
+ * @data_bytes: Number of data bytes per chunk
+ * @spare_bytes: Number of spare bytes per chunk
+ * @ecc_bytes: Number of ecc bytes per chunk
+ * @last_data_bytes: Number of data bytes in the last chunk
+ * @last_spare_bytes: Number of spare bytes in the last chunk
+ * @last_ecc_bytes: Number of ecc bytes in the last chunk
+ */
+struct marvell_hw_ecc_layout {
+ /* Constraints */
+ int writesize;
+ int chunk;
+ int strength;
+ /* Corresponding layout */
+ int nchunks;
+ int full_chunk_cnt;
+ int data_bytes;
+ int spare_bytes;
+ int ecc_bytes;
+ int last_data_bytes;
+ int last_spare_bytes;
+ int last_ecc_bytes;
+};
+
+#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
+ { \
+ .writesize = ws, \
+ .chunk = dc, \
+ .strength = ds, \
+ .nchunks = nc, \
+ .full_chunk_cnt = fcc, \
+ .data_bytes = db, \
+ .spare_bytes = sb, \
+ .ecc_bytes = eb, \
+ .last_data_bytes = ldb, \
+ .last_spare_bytes = lsb, \
+ .last_ecc_bytes = leb, \
+ }
+
+/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
+static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+};
+
+/**
+ * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
+ * is made by a field in NDCB0 register, and in another field in NDCB2 register.
+ * The datasheet describes the logic with an error: ADDR5 field is once
+ * declared at the beginning of NDCB2, and another time at its end. Because the
+ * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
+ * to use the last bit of this field instead of the first ones.
+ *
+ * @cs: Wanted CE lane.
+ * @ndcb0_csel: Value of the NDCB0 register with or without the flag
+ * selecting the wanted CE lane. This is set once when
+ * the Device Tree is probed.
+ * @rb: Ready/Busy pin for the flash chip
+ */
+struct marvell_nand_chip_sel {
+ unsigned int cs;
+ u32 ndcb0_csel;
+ unsigned int rb;
+};
+
+/**
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @chip: Base NAND chip structure
+ * @node: Used to store NAND chips into a list
+ * @layout NAND layout when using hardware ECC
+ * @ndcr: Controller register value for this NAND chip
+ * @ndtr0: Timing registers 0 value for this NAND chip
+ * @ndtr1: Timing registers 1 value for this NAND chip
+ * @selected_die: Current active CS
+ * @nsels: Number of CS lines required by the NAND chip
+ * @sels: Array of CS lines descriptions
+ */
+struct marvell_nand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ const struct marvell_hw_ecc_layout *layout;
+ u32 ndcr;
+ u32 ndtr0;
+ u32 ndtr1;
+ int addr_cyc;
+ int selected_die;
+ unsigned int nsels;
+ struct marvell_nand_chip_sel sels[0];
+};
+
+static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct marvell_nand_chip, chip);
+}
+
+static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
+ *nand)
+{
+ return &nand->sels[nand->selected_die];
+}
+
+/**
+ * NAND controller capabilities for distinction between compatible strings
+ *
+ * @max_cs_nb: Number of Chip Select lines available
+ * @max_rb_nb: Number of Ready/Busy lines available
+ * @need_system_controller: Indicates if the SoC needs to have access to the
+ * system controller (ie. to enable the NAND controller)
+ * @legacy_of_bindings: Indicates if DT parsing must be done using the old
+ * fashion way
+ * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
+ * BCH error detection and correction algorithm,
+ * NDCB3 register has been added
+ * @use_dma: Use dma for data transfers
+ */
+struct marvell_nfc_caps {
+ unsigned int max_cs_nb;
+ unsigned int max_rb_nb;
+ bool need_system_controller;
+ bool legacy_of_bindings;
+ bool is_nfcv2;
+ bool use_dma;
+};
+
+/**
+ * NAND controller structure: stores Marvell NAND controller information
+ *
+ * @controller: Base controller structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ecc_clk: ECC block clock, two times the NAND controller clock
+ * @complete: Completion object to wait for NAND controller events
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @chips: List containing all the NAND chips attached to
+ * this NAND controller
+ * @caps: NAND controller capabilities for each compatible string
+ * @dma_chan: DMA channel (NFCv1 only)
+ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
+ */
+struct marvell_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ecc_clk;
+ struct completion complete;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ const struct marvell_nfc_caps *caps;
+
+ /* DMA (NFCv1 only) */
+ bool use_dma;
+ struct dma_chan *dma_chan;
+ u8 *dma_buf;
+};
+
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct marvell_nfc, controller);
+}
+
+/**
+ * NAND controller timings expressed in NAND Controller clock cycles
+ *
+ * @tRP: ND_nRE pulse width
+ * @tRH: ND_nRE high duration
+ * @tWP: ND_nWE pulse time
+ * @tWH: ND_nWE high duration
+ * @tCS: Enable signal setup time
+ * @tCH: Enable signal hold time
+ * @tADL: Address to write data delay
+ * @tAR: ND_ALE low to ND_nRE low delay
+ * @tWHR: ND_nWE high to ND_nRE low for status read
+ * @tRHW: ND_nRE high duration, read to write delay
+ * @tR: ND_nWE high to ND_nRE low for read
+ */
+struct marvell_nfc_timings {
+ /* NDTR0 fields */
+ unsigned int tRP;
+ unsigned int tRH;
+ unsigned int tWP;
+ unsigned int tWH;
+ unsigned int tCS;
+ unsigned int tCH;
+ unsigned int tADL;
+ /* NDTR1 fields */
+ unsigned int tAR;
+ unsigned int tWHR;
+ unsigned int tRHW;
+ unsigned int tR;
+};
+
+/**
+ * Derives a duration in numbers of clock cycles.
+ *
+ * @ps: Duration in pico-seconds
+ * @period_ns: Clock period in nano-seconds
+ *
+ * Convert the duration in nano-seconds, then divide by the period and
+ * return the number of clock periods.
+ */
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
+
+/**
+ * NAND driver structure filled during the parsing of the ->exec_op() subop
+ * subset of instructions.
+ *
+ * @ndcb: Array of values written to NDCBx registers
+ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @rdy_delay_ns: Optional delay after waiting for the RB pin
+ * @data_delay_ns: Optional delay after the data xfer
+ * @data_instr_idx: Index of the data instruction in the subop
+ * @data_instr: Pointer to the data instruction in the subop
+ */
+struct marvell_nfc_op {
+ u32 ndcb[4];
+ unsigned int cle_ale_delay_ns;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int data_delay_ns;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/*
+ * Internal helper to conditionnally apply a delay (from the above structure,
+ * most of the time).
+ */
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/*
+ * The controller has many flags that could generate interrupts, most of them
+ * are disabled and polling is used. For the very slow signals, using interrupts
+ * may relax the CPU charge.
+ */
+static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 1 disables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 0 enables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ writel_relaxed(int_mask, nfc->regs + NDSR);
+}
+
+static void marvell_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (force_8bit)
+ ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
+ else
+ ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ writel_relaxed(ndcr, nfc->regs + NDCR);
+}
+
+static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ /*
+ * The command is being processed, wait for the ND_RUN bit to be
+ * cleared by the NFC. If not, we must clear it by hand.
+ */
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
+ (val & NDCR_ND_RUN) == 0,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Any time a command has to be sent to the controller, the following sequence
+ * has to be followed:
+ * - call marvell_nfc_prepare_cmd()
+ * -> activate the ND_RUN bit that will kind of 'start a job'
+ * -> wait the signal indicating the NFC is waiting for a command
+ * - send the command (cmd and address cycles)
+ * - enventually send or receive the data
+ * - call marvell_nfc_end_cmd() with the corresponding flag
+ * -> wait the flag to be triggered or cancel the job with a timeout
+ *
+ * The following helpers are here to factorize the code a bit so that
+ * specialized functions responsible for executing the actual NAND
+ * operations do not have to replicate the same code blocks.
+ */
+static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr, val;
+ int ret;
+
+ /* Poll ND_RUN and clear NDSR before issuing any command */
+ ret = marvell_nfc_wait_ndrun(chip);
+ if (ret) {
+ dev_err(nfc->dev, "Last operation did not succeed\n");
+ return ret;
+ }
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
+
+ /* Assert ND_RUN bit and wait the NFC to be ready */
+ writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & NDSR_WRCMDREQ,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on WRCMDRE\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Command may be written, clear WRCMDREQ status bit */
+ writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static void marvell_nfc_send_cmd(struct nand_chip *chip,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
+ "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
+ (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
+ nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
+
+ writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
+ nfc->regs + NDCB0);
+ writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
+ writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
+
+ /*
+ * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
+ * fields are used (only available on NFCv2).
+ */
+ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
+ NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
+ if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
+ writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
+ }
+}
+
+static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
+ const char *label)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & flag,
+ POLL_PERIOD, POLL_TIMEOUT);
+
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
+ label, val);
+ if (nfc->dma_chan)
+ dmaengine_terminate_all(nfc->dma_chan);
+ return ret;
+ }
+
+ /*
+ * DMA function uses this helper to poll on CMDD bits without wanting
+ * them to be cleared.
+ */
+ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
+ return 0;
+
+ writel_relaxed(flag, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
+
+ return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
+}
+
+static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ /* Timeout is expressed in ms */
+ if (!timeout_ms)
+ timeout_ms = IRQ_TIMEOUT;
+
+ init_completion(&nfc->complete);
+
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+ if (!ret) {
+ dev_err(nfc->dev, "Timeout waiting for RB signal\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr_generic;
+
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
+ nfc->selected_chip = NULL;
+ marvell_nand->selected_die = -1;
+ return;
+ }
+
+ /*
+ * Do not change the timing registers when using the DT property
+ * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
+ * marvell_nand structure are supposedly empty.
+ */
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
+ /*
+ * Reset the NDCR register to a clean state for this particular chip,
+ * also clear ND_RUN bit.
+ */
+ ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
+ NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
+ writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
+
+ /* Also reset the interrupt status register */
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+
+ nfc->selected_chip = chip;
+ marvell_nand->selected_die = die_nr;
+}
+
+static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
+{
+ struct marvell_nfc *nfc = dev_id;
+ u32 st = readl_relaxed(nfc->regs + NDSR);
+ u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
+
+ /*
+ * RDY interrupt mask is one bit in NDCR while there are two status
+ * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
+ */
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if (!(st & ien))
+ return IRQ_NONE;
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+ if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+/* HW ECC related functions */
+static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (!(ndcr & NDCR_ECC_EN)) {
+ writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
+
+ /*
+ * When enabling BCH, set threshold to 0 to always know the
+ * number of corrected bitflips.
+ */
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
+ }
+}
+
+static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (ndcr & NDCR_ECC_EN) {
+ writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+ }
+}
+
+/* DMA related helpers */
+static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+/* Read/write PIO/DMA accessors */
+static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
+ enum dma_data_direction direction,
+ unsigned int len)
+{
+ unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ dma_cookie_t cookie;
+ int ret;
+
+ marvell_nfc_enable_dma(nfc);
+ /* Prepare the DMA transfer */
+ sg_init_one(&sg, nfc->dma_buf, dma_len);
+ dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+ direction == DMA_FROM_DEVICE ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ return -ENXIO;
+ }
+
+ /* Do the task and wait for it to finish */
+ cookie = dmaengine_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return -EIO;
+
+ dma_async_issue_pending(nfc->dma_chan);
+ ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ marvell_nfc_disable_dma(nfc);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
+ dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
+ dmaengine_terminate_all(nfc->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ memcpy(in + last_full_offset, tmp_buf, last_len);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ memcpy(tmp_buf, out + last_full_offset, last_len);
+ iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
+ u8 *data, int data_len,
+ u8 *spare, int spare_len,
+ u8 *ecc, int ecc_len,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bf;
+
+ /*
+ * Blank pages (all 0xFF) that have not been written may be recognized
+ * as bad if bitflips occur, so whenever an uncorrectable error occurs,
+ * check if the entire page (with ECC bytes) is actually blank or not.
+ */
+ if (!data)
+ data_len = 0;
+ if (!spare)
+ spare_len = 0;
+ if (!ecc)
+ ecc_len = 0;
+
+ bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
+ spare, spare_len, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ return;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+}
+
+/*
+ * Check a chunk is correct or not according to hardware ECC engine.
+ * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
+ * mtd->ecc_stats.failure is not, the function will instead return a non-zero
+ * value indicating that a check on the emptyness of the subpage must be
+ * performed before declaring the subpage corrupted.
+ */
+static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int bf = 0;
+ u32 ndsr;
+
+ ndsr = readl_relaxed(nfc->regs + NDSR);
+
+ /* Check uncorrectable error flag */
+ if (ndsr & NDSR_UNCERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ /*
+ * Do not increment ->ecc_stats.failed now, instead, return a
+ * non-zero value to indicate that this chunk was apparently
+ * bad, and it should be check to see if it empty or not. If
+ * the chunk (with ECC bytes) is not declared empty, the calling
+ * function must increment the failure count.
+ */
+ return -EBADMSG;
+ }
+
+ /* Check correctable error flag */
+ if (ndsr & NDSR_CORERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ bf = NDSR_ERRCNT(ndsr);
+ else
+ bf = 1;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+
+ return 0;
+}
+
+/* Hamming read helpers */
+static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf,
+ bool raw, int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART),
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data/oob)");
+ if (ret)
+ return ret;
+
+ /*
+ * Read the page then the OOB area. Unlike what is shown in current
+ * documentation, spare bytes are protected by the ECC engine, and must
+ * be at the beginning of the OOB area or running this driver on legacy
+ * systems will prevent the discovery of the BBM/BBT.
+ */
+ if (nfc->use_dma) {
+ marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
+ lt->data_bytes + oob_bytes);
+ memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
+ memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
+ } else {
+ marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int max_bitflips = 0, ret;
+ u8 *raw_buf;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
+ page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!ret)
+ return max_bitflips;
+
+ /*
+ * When ECC failures are detected, check if the full page has been
+ * written or not. Ignore the failure if it is actually empty.
+ */
+ raw_buf = kmalloc(full_sz, GFP_KERNEL);
+ if (!raw_buf)
+ return -ENOMEM;
+
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
+ lt->data_bytes, true, page);
+ marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
+ &max_bitflips);
+ kfree(raw_buf);
+
+ return max_bitflips;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->read_oob_raw() function
+ * also stands for ->read_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* Hamming write helpers */
+static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ const u8 *data_buf,
+ const u8 *oob_buf, bool raw,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN) |
+ NDCB0_CMD2(NAND_CMD_PAGEPROG) |
+ NDCB0_DBC,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Write the page then the OOB area */
+ if (nfc->use_dma) {
+ memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
+ memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
+ marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
+ lt->ecc_bytes + lt->spare_bytes);
+ } else {
+ marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ false, page);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ return ret;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->write_oob_raw() function
+ * also stands for ->write_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* BCH read helpers */
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u8 *oob = chip->oob_poi;
+ int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int chunk;
+
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update last chunk length */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Read data bytes*/
+ nand_change_read_column_op(chip, chunk * chunk_size,
+ buf + (lt->data_bytes * chunk),
+ data_len, false);
+
+ /* Read spare bytes */
+ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
+ spare_len, false);
+
+ /* Read ECC bytes */
+ nand_read_data_op(chip, oob + ecc_offset +
+ (ALIGN(lt->ecc_bytes, 32) * chunk),
+ ecc_len, false);
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ u8 *data, unsigned int data_len,
+ u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int i, ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_LEN_OVRD,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return;
+
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART);
+
+ /*
+ * Trigger the naked read operation only on the last chunk.
+ * Otherwise, use monolithic read.
+ */
+ if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO, 8 32-bit reads at a time, and skip
+ * the polling on the last read.
+ *
+ * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
+ */
+ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data)");
+ marvell_nfc_xfer_data_in_pio(nfc, data,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ data += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+
+ for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (OOB)");
+ marvell_nfc_xfer_data_in_pio(nfc, spare,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ spare += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+}
+
+static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
+ u8 *data = buf, *spare = chip->oob_poi, *ecc;
+ int max_bitflips = 0;
+ u32 failure_mask = 0;
+ int chunk, ecc_offset_in_page, ret;
+
+ /*
+ * With BCH, OOB is not fully used (and thus not read entirely), not
+ * expected bytes could show up at the end of the OOB buffer if not
+ * explicitly erased.
+ */
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update length for the last chunk */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ /* Read the chunk and detect number of bitflips */
+ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ if (ret)
+ failure_mask |= BIT(chunk);
+
+ data += data_len;
+ spare += spare_len;
+ }
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!failure_mask)
+ return max_bitflips;
+
+ /*
+ * Please note that dumping the ECC bytes during a normal read with OOB
+ * area would add a significant overhead as ECC bytes are "consumed" by
+ * the controller in normal mode and must be re-read in raw mode. To
+ * avoid dropping the performances, we prefer not to include them. The
+ * user should re-read the page in raw mode if ECC bytes are required.
+ *
+ * However, for any subpage read error reported by ->correct(), the ECC
+ * bytes must be read in raw mode and the full subpage must be checked
+ * to see if it is entirely empty of if there was an actual error.
+ */
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* No failure reported for this chunk, move to the next one */
+ if (!(failure_mask & BIT(chunk)))
+ continue;
+
+ /* Derive ECC bytes positions (in page/buffer) and length */
+ ecc = chip->oob_poi +
+ (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * ALIGN(lt->ecc_bytes, 32));
+ ecc_offset_in_page =
+ (chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes)) +
+ (chunk < lt->full_chunk_cnt ?
+ lt->data_bytes + lt->spare_bytes :
+ lt->last_data_bytes + lt->last_spare_bytes);
+ ecc_len = chunk < lt->full_chunk_cnt ?
+ lt->ecc_bytes : lt->last_ecc_bytes;
+
+ /* Do the actual raw read of the ECC bytes */
+ nand_change_read_column_op(chip, ecc_offset_in_page,
+ ecc, ecc_len, false);
+
+ /* Derive data/spare bytes positions (in buffer) and length */
+ data = buf + (chunk * lt->data_bytes);
+ data_len = chunk < lt->full_chunk_cnt ?
+ lt->data_bytes : lt->last_data_bytes;
+ spare = chip->oob_poi + (chunk * (lt->spare_bytes +
+ lt->ecc_bytes));
+ spare_len = chunk < lt->full_chunk_cnt ?
+ lt->spare_bytes : lt->last_spare_bytes;
+
+ /* Check the entire chunk (data + spare + ecc) for emptyness */
+ marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
+ spare_len, ecc, ecc_len,
+ &max_bitflips);
+ }
+
+ return max_bitflips;
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* BCH write helpers */
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int spare_offset = 0;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int chunk;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Point to the column of the next chunk */
+ nand_change_write_column_op(chip, chunk * full_chunk_size,
+ NULL, 0, false);
+
+ /* Write the data */
+ nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
+ data_len, false);
+
+ if (!oob_required)
+ continue;
+
+ /* Write the spare bytes */
+ if (spare_len)
+ nand_write_data_op(chip, chip->oob_poi + spare_offset,
+ spare_len, false);
+
+ /* Write the ECC bytes */
+ if (ecc_len)
+ nand_write_data_op(chip, chip->oob_poi + ecc_offset,
+ ecc_len, false);
+
+ spare_offset += spare_len;
+ ecc_offset += ALIGN(ecc_len, 32);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int
+marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ const u8 *data, unsigned int data_len,
+ const u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ /*
+ * First operation dispatches the CMD_SEQIN command, issue the address
+ * cycles and asks for the first chunk of data.
+ * All operations in the middle (if any) will issue a naked write and
+ * also ask for data.
+ * Last operation (if any) asks for the last chunk of data through a
+ * last naked write.
+ */
+ if (chunk == 0) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+ nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
+ } else if (chunk < lt->nchunks - 1) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ } else {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ }
+
+ /* Always dispatch the PAGEPROG command on the last chunk */
+ if (chunk == lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Transfer the contents */
+ iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
+ iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ const u8 *data = buf;
+ const u8 *spare = chip->oob_poi;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int chunk, ret;
+
+ /* Spare data will be written anyway, so clear it to avoid garbage */
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ data += data_len;
+ spare += spare_len;
+
+ /*
+ * Waiting only for CMDD or PAGED is not enough, ECC are
+ * partially written. No flag is set once the operation is
+ * really finished but the ND_RUN bit is cleared, so wait for it
+ * before stepping into the next command.
+ */
+ marvell_nfc_wait_ndrun(chip);
+ }
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void marvell_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ bool first_cmd = true;
+ unsigned int op_id;
+ int i;
+
+ /* Reset the input structure as most of its fields will be OR'ed */
+ memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ int len = nand_subop_get_data_len(subop, op_id);
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD2(instr->ctx.cmd.opcode) |
+ NDCB0_DBC;
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ nfc_op->ndcb[1] |= addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
+ if (naddrs >= 6)
+ nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
+ if (naddrs == 7)
+ nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct nand_op_instr *instr = nfc_op->data_instr;
+ unsigned int op_id = nfc_op->data_instr_idx;
+ unsigned int len = nand_subop_get_data_len(subop, op_id);
+ unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
+ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
+ int ret;
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, true);
+
+ if (reading) {
+ u8 *in = instr->ctx.data.buf.in + offset;
+
+ ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
+ } else {
+ const u8 *out = instr->ctx.data.buf.out + offset;
+
+ ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
+ }
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, false);
+
+ return ret;
+}
+
+static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ bool reading;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ if (!reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (!reading) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Naked access are different in that they need to be flagged as naked
+ * by the controller. Reset the controller registers fields that inform
+ * on the type and refill them according to the ongoing operation.
+ */
+ nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
+ NDCB0_CMD_XTYPE(XTYPE_MASK));
+ switch (subop->instrs[0].type) {
+ case NAND_OP_CMD_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ default:
+ /* This should never happen */
+ break;
+ }
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ if (!nfc_op.data_instr) {
+ ret = marvell_nfc_wait_cmdd(chip);
+ cond_delay(nfc_op.cle_ale_delay_ns);
+ return ret;
+ }
+
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return ret;
+}
+
+static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading ID");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading status");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
+ /* Monolithic reads/writes */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ /* Naked commands */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
+ /* Naked commands not supported, use a function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_erase_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int marvell_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ if (nfc->caps->is_nfcv2)
+ return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
+ op, check_only);
+ else
+ return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
+ op, check_only);
+}
+
+/*
+ * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
+ * usable.
+ */
+static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
+ lt->last_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
+ oobregion->offset = 6;
+ else
+ oobregion->offset = 2;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
+ .ecc = marvell_nand_ooblayout_ecc,
+ .free = marvell_nand_ooblayout_free,
+};
+
+static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *l;
+ int i;
+
+ if (!nfc->caps->is_nfcv2 &&
+ (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
+ dev_err(nfc->dev,
+ "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
+ mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
+ return -ENOTSUPP;
+ }
+
+ to_marvell_nand(chip)->layout = NULL;
+ for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
+ l = &marvell_nfc_layouts[i];
+ if (mtd->writesize == l->writesize &&
+ ecc->size == l->chunk && ecc->strength == l->strength) {
+ to_marvell_nand(chip)->layout = l;
+ break;
+ }
+ }
+
+ if (!to_marvell_nand(chip)->layout ||
+ (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
+ dev_err(nfc->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ ecc->strength, mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
+ ecc->steps = l->nchunks;
+ ecc->size = l->data_bytes;
+
+ if (ecc->strength == 1) {
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
+ ecc->read_oob = ecc->read_oob_raw;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
+ ecc->write_oob = ecc->write_oob_raw;
+ } else {
+ chip->ecc.algo = NAND_ECC_BCH;
+ ecc->strength = 16;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
+ ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
+ ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
+ }
+
+ return 0;
+}
+
+static int marvell_nand_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
+ if (chip->ecc_step_ds && chip->ecc_strength_ds) {
+ ecc->size = chip->ecc_step_ds;
+ ecc->strength = chip->ecc_strength_ds;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ ecc->size = 512;
+ ecc->strength = 1;
+ }
+ }
+
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
+ mtd->writesize != SZ_2K) {
+ dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
+ mtd->writesize);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface
+ *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(nfc->ecc_clk) * 2;
+ const struct nand_sdr_timings *sdr;
+ struct marvell_nfc_timings nfc_tmg;
+ int read_delay;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles, which is half of the
+ * frequency of the accessible ECC clock retrieved by clk_get_rate().
+ * This is not written anywhere in the datasheet but was observed
+ * with an oscilloscope.
+ *
+ * NFC datasheet gives equations from which thoses calculations
+ * are derived, they tend to be slightly more restrictives than the
+ * given core timings and may improve the overall speed.
+ */
+ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
+ nfc_tmg.tRH = nfc_tmg.tRP;
+ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
+ nfc_tmg.tWH = nfc_tmg.tWP;
+ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
+ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
+ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
+ /*
+ * Read delay is the time of propagation from SoC pins to NFC internal
+ * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
+ * EDO mode, an additional delay of tRH must be taken into account so
+ * the data is sampled on the falling edge instead of the rising edge.
+ */
+ read_delay = sdr->tRC_min >= 30000 ?
+ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
+
+ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
+ /*
+ * tWHR and tRHW are supposed to be read to write delays (and vice
+ * versa) but in some cases, ie. when doing a change column, they must
+ * be greater than that to be sure tCCS delay is respected.
+ */
+ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
+ period_ns) - 2,
+ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
+ period_ns);
+
+ /* Use WAIT_MODE (wait for RB line) instead of only relying on delays */
+ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
+
+ if (chipnr < 0)
+ return 0;
+
+ marvell_nand->ndtr0 =
+ NDTR0_TRP(nfc_tmg.tRP) |
+ NDTR0_TRH(nfc_tmg.tRH) |
+ NDTR0_ETRP(nfc_tmg.tRP) |
+ NDTR0_TWP(nfc_tmg.tWP) |
+ NDTR0_TWH(nfc_tmg.tWH) |
+ NDTR0_TCS(nfc_tmg.tCS) |
+ NDTR0_TCH(nfc_tmg.tCH) |
+ NDTR0_RD_CNT_DEL(read_delay) |
+ NDTR0_SELCNTR |
+ NDTR0_TADL(nfc_tmg.tADL);
+
+ marvell_nand->ndtr1 =
+ NDTR1_TAR(nfc_tmg.tAR) |
+ NDTR1_TWHR(nfc_tmg.tWHR) |
+ NDTR1_TRHW(nfc_tmg.tRHW) |
+ NDTR1_WAIT_MODE |
+ NDTR1_TR(nfc_tmg.tR);
+
+ return 0;
+}
+
+static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ struct device_node *np)
+{
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct marvell_nand_chip *marvell_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs, rb;
+
+ /*
+ * The legacy "num-cs" property indicates the number of CS on the only
+ * chip connected to the controller (legacy bindings does not support
+ * more than one chip). CS are only incremented one by one while the RB
+ * pin is always the #0.
+ *
+ * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ * properties must be filled. For each chip, expressed as a subnode,
+ * "reg" points to the CS lines and "nand-rb" to the RB line.
+ */
+ if (pdata) {
+ nsels = 1;
+ } else if (nfc->caps->legacy_of_bindings &&
+ !of_get_property(np, "num-cs", &nsels)) {
+ dev_err(dev, "missing num-cs property\n");
+ return -EINVAL;
+ } else if (!of_get_property(np, "reg", &nsels)) {
+ dev_err(dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ if (!pdata)
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ /* Alloc the nand chip structure */
+ marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
+ (nsels *
+ sizeof(struct marvell_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!marvell_nand) {
+ dev_err(dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ marvell_nand->nsels = nsels;
+ marvell_nand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * Legacy bindings use the CS lines in natural
+ * order (0, 1, ...)
+ */
+ cs = i;
+ } else {
+ /* Retrieve CS id */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (cs >= nfc->caps->max_cs_nb) {
+ dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
+ cs, nfc->caps->max_cs_nb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * The cs variable represents the chip select id, which must be
+ * converted in bit fields for NDCB0 and NDCB2 to select the
+ * right chip. Unfortunately, due to a lack of information on
+ * the subject and incoherent documentation, the user should not
+ * use CS1 and CS3 at all as asserting them is not supported in
+ * a reliable way (due to multiplexing inside ADDR5 field).
+ */
+ marvell_nand->sels[i].cs = cs;
+ switch (cs) {
+ case 0:
+ case 2:
+ marvell_nand->sels[i].ndcb0_csel = 0;
+ break;
+ case 1:
+ case 3:
+ marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Retrieve RB id */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /* Legacy bindings always use RB #0 */
+ rb = 0;
+ } else {
+ ret = of_property_read_u32_index(np, "nand-rb", i,
+ &rb);
+ if (ret) {
+ dev_err(dev,
+ "could not retrieve RB property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (rb >= nfc->caps->max_rb_nb) {
+ dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
+ rb, nfc->caps->max_rb_nb);
+ return -EINVAL;
+ }
+
+ marvell_nand->sels[i].rb = rb;
+ }
+
+ chip = &marvell_nand->chip;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->exec_op = marvell_nfc_exec_op;
+ chip->select_chip = marvell_nfc_select_chip;
+ if (nfc->caps->is_nfcv2 &&
+ !of_property_read_bool(np, "marvell,nand-keep-config"))
+ chip->setup_data_interface = marvell_nfc_setup_data_interface;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ /*
+ * Save a reference value for timing registers before
+ * ->setup_data_interface() is called.
+ */
+ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
+ marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
+
+ chip->options |= NAND_BUSWIDTH_AUTO;
+ ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
+ if (ret) {
+ dev_err(dev, "could not identify the nand chip\n");
+ return ret;
+ }
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata)
+ /* Legacy bindings support only one chip */
+ ret = mtd_device_register(mtd, pdata->parts[0],
+ pdata->nr_parts[0]);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&marvell_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = nfc->caps->max_cs_nb;
+ int nchips;
+ int ret;
+
+ if (!np)
+ nchips = 1;
+ else
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
+ max_cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Legacy bindings do not use child nodes to exhibit NAND chip
+ * properties and layout. Instead, NAND properties are mixed with the
+ * controller ones, and partitions are defined as direct subnodes of the
+ * NAND controller node.
+ */
+ if (nfc->caps->legacy_of_bindings) {
+ ret = marvell_nand_chip_init(dev, nfc, np);
+ return ret;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = marvell_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ nand_release(nand_to_mtd(&entry->chip));
+ list_del(&entry->node);
+ }
+}
+
+static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
+{
+ struct platform_device *pdev = container_of(nfc->dev,
+ struct platform_device,
+ dev);
+ struct dma_slave_config config = {};
+ struct resource *r;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PXA_DMA)) {
+ dev_warn(nfc->dev,
+ "DMA not enabled in configuration\n");
+ return -ENOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(nfc->dev, "No resource defined for data DMA\n");
+ return -ENXIO;
+ }
+
+ param.drcmr = r->start;
+ param.prio = PXAD_PRIO_LOWEST;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ nfc->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, nfc->dev,
+ "data");
+ if (!nfc->dma_chan) {
+ dev_err(nfc->dev,
+ "Unable to request data DMA channel\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENXIO;
+
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = r->start + NDDB;
+ config.dst_addr = r->start + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(nfc->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(nfc->dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
+
+ /*
+ * DMA must act on length multiple of 32 and this length may be
+ * bigger than the destination buffer. Use this buffer instead
+ * for DMA transfers and then copy the desired amount of data to
+ * the provided buffer.
+ */
+ nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!nfc->dma_buf)
+ return -ENOMEM;
+
+ nfc->use_dma = true;
+
+ return 0;
+}
+
+static int marvell_nfc_init(struct marvell_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node;
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller, gated clocks and reset bits to avoid being bootloader
+ * dependent. This is done through the use of the System Functions
+ * registers.
+ */
+ if (nfc->caps->need_system_controller) {
+ struct regmap *sysctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+
+ regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
+ reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
+ regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
+
+ regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
+ reg |= GENCONF_ND_CLK_CTRL_EN;
+ regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
+ }
+
+ /* Configure the DMA if appropriate */
+ if (!nfc->caps->is_nfcv2)
+ marvell_nfc_init_dma(nfc);
+
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+
+ return 0;
+}
+
+static int marvell_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct marvell_nfc *nfc;
+ int ret;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_hw_control_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ecc_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nfc->ecc_clk))
+ return PTR_ERR(nfc->ecc_clk);
+
+ ret = clk_prepare_enable(nfc->ecc_clk);
+ if (ret)
+ return ret;
+
+ marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ ret = devm_request_irq(dev, irq, marvell_nfc_isr,
+ 0, "marvell-nfc", nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ /* Get NAND controller capabilities */
+ if (pdev->id_entry)
+ nfc->caps = (void *)pdev->id_entry->driver_data;
+ else
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+
+ if (!nfc->caps) {
+ dev_err(dev, "Could not retrieve NFC caps\n");
+ ret = -EINVAL;
+ goto unprepare_clk;
+ }
+
+ /* Init the controller and then probe the chips */
+ ret = marvell_nfc_init(nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = marvell_nand_chips_init(dev, nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ return 0;
+
+unprepare_clk:
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return ret;
+}
+
+static int marvell_nfc_remove(struct platform_device *pdev)
+{
+ struct marvell_nfc *nfc = platform_get_drvdata(pdev);
+
+ marvell_nand_chips_cleanup(nfc);
+
+ if (nfc->use_dma) {
+ dmaengine_terminate_all(nfc->dma_chan);
+ dma_release_channel(nfc->dma_chan);
+ }
+
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return 0;
+}
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .use_dma = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .legacy_of_bindings = true,
+ .use_dma = true,
+};
+
+static const struct platform_device_id marvell_nfc_platform_ids[] = {
+ {
+ .name = "pxa3xx-nand",
+ .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
+
+static const struct of_device_id marvell_nfc_of_ids[] = {
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = &marvell_armada_8k_nfc_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand-controller",
+ .data = &marvell_armada370_nfc_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand-controller",
+ .data = &marvell_pxa3xx_nfc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = &marvell_armada_8k_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = &marvell_armada370_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = &marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
+
+static struct platform_driver marvell_nfc_driver = {
+ .driver = {
+ .name = "marvell-nfc",
+ .of_match_table = marvell_nfc_of_ids,
+ },
+ .id_table = marvell_nfc_platform_ids,
+ .probe = marvell_nfc_probe,
+ .remove = marvell_nfc_remove,
+};
+module_platform_driver(marvell_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index c51d214d169e..40d86a861a70 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -34,34 +34,28 @@
#define ECC_ENCCON (0x00)
#define ECC_ENCCNFG (0x04)
-#define ECC_MODE_SHIFT (5)
#define ECC_MS_SHIFT (16)
#define ECC_ENCDIADDR (0x08)
#define ECC_ENCIDLE (0x0C)
-#define ECC_ENCIRQ_EN (0x80)
-#define ECC_ENCIRQ_STA (0x84)
#define ECC_DECCON (0x100)
#define ECC_DECCNFG (0x104)
#define DEC_EMPTY_EN BIT(31)
#define DEC_CNFG_CORRECT (0x3 << 12)
#define ECC_DECIDLE (0x10C)
#define ECC_DECENUM0 (0x114)
-#define ECC_DECDONE (0x124)
-#define ECC_DECIRQ_EN (0x200)
-#define ECC_DECIRQ_STA (0x204)
#define ECC_TIMEOUT (500000)
#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
-#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
- ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
struct mtk_ecc_caps {
u32 err_mask;
const u8 *ecc_strength;
+ const u32 *ecc_regs;
u8 num_ecc_strength;
- u32 encode_parity_reg0;
+ u8 ecc_mode_shift;
+ u32 parity_bits;
int pg_irq_sel;
};
@@ -89,6 +83,46 @@ static const u8 ecc_strength_mt2712[] = {
40, 44, 48, 52, 56, 60, 68, 72, 80
};
+static const u8 ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12, 14, 16
+};
+
+enum mtk_ecc_regs {
+ ECC_ENCPAR00,
+ ECC_ENCIRQ_EN,
+ ECC_ENCIRQ_STA,
+ ECC_DECDONE,
+ ECC_DECIRQ_EN,
+ ECC_DECIRQ_STA,
+};
+
+static int mt2701_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt2712_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x300,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt7622_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x30,
+ [ECC_ENCIRQ_STA] = 0x34,
+ [ECC_DECDONE] = 0x11c,
+ [ECC_DECIRQ_EN] = 0x140,
+ [ECC_DECIRQ_STA] = 0x144,
+};
+
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
enum mtk_ecc_operation op)
{
@@ -107,32 +141,30 @@ static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
static irqreturn_t mtk_ecc_irq(int irq, void *id)
{
struct mtk_ecc *ecc = id;
- enum mtk_ecc_operation op;
u32 dec, enc;
- dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
+ & ECC_IRQ_EN;
if (dec) {
- op = ECC_DECODE;
- dec = readw(ecc->regs + ECC_DECDONE);
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
ecc->sectors = 0;
complete(&ecc->done);
} else {
return IRQ_HANDLED;
}
} else {
- enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
- if (enc) {
- op = ECC_ENCODE;
+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
+ & ECC_IRQ_EN;
+ if (enc)
complete(&ecc->done);
- } else {
+ else
return IRQ_NONE;
- }
}
return IRQ_HANDLED;
@@ -160,7 +192,7 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
/* configure ECC encoder (in bits) */
enc_sz = config->len << 3;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (enc_sz << ECC_MS_SHIFT);
writel(reg, ecc->regs + ECC_ENCCNFG);
@@ -171,9 +203,9 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
} else {
/* configure ECC decoder (in bits) */
dec_sz = (config->len << 3) +
- config->strength * ECC_PARITY_BITS;
+ config->strength * ecc->caps->parity_bits;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
reg |= DEC_EMPTY_EN;
writel(reg, ecc->regs + ECC_DECCNFG);
@@ -291,7 +323,12 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
*/
if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
reg_val |= ECC_PG_IRQ_SEL;
- writew(reg_val, ecc->regs + ECC_IRQ_REG(op));
+ if (op == ECC_ENCODE)
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ else
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
}
writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
@@ -310,13 +347,17 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
- if (op == ECC_DECODE)
+ if (op == ECC_DECODE) {
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
- writew(0, ecc->regs + ECC_IRQ_REG(op));
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ } else {
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ }
+
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
mutex_unlock(&ecc->lock);
@@ -367,11 +408,11 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
mtk_ecc_wait_idle(ecc, ECC_ENCODE);
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
- len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
/* write the parity bytes generated by the ECC back to temp buffer */
__ioread32_copy(ecc->eccdata,
- ecc->regs + ecc->caps->encode_parity_reg0,
+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
round_up(len, 4));
/* copy into possibly unaligned OOB region with actual length */
@@ -404,22 +445,42 @@ void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
}
EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
+{
+ return ecc->caps->parity_bits;
+}
+EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
+
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
.ecc_strength = ecc_strength_mt2701,
+ .ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
- .encode_parity_reg0 = 0x10,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 0,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
.ecc_strength = ecc_strength_mt2712,
+ .ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
- .encode_parity_reg0 = 0x300,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 1,
};
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
+ .err_mask = 0x3f,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_regs = mt7622_ecc_regs,
+ .num_ecc_strength = 7,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .pg_irq_sel = 0,
+};
+
static const struct of_device_id mtk_ecc_dt_match[] = {
{
.compatible = "mediatek,mt2701-ecc",
@@ -427,6 +488,9 @@ static const struct of_device_id mtk_ecc_dt_match[] = {
}, {
.compatible = "mediatek,mt2712-ecc",
.data = &mtk_ecc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-ecc",
+ .data = &mtk_ecc_caps_mt7622,
},
{},
};
@@ -452,7 +516,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
max_eccdata_size = ecc->caps->num_ecc_strength - 1;
max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
- max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3;
+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
max_eccdata_size = round_up(max_eccdata_size, 4);
ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
if (!ecc->eccdata)
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
index d245c14f1b80..a455df080952 100644
--- a/drivers/mtd/nand/mtk_ecc.h
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -14,8 +14,6 @@
#include <linux/types.h>
-#define ECC_PARITY_BITS (14)
-
enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
@@ -43,6 +41,7 @@ int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
void mtk_ecc_disable(struct mtk_ecc *);
void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
void mtk_ecc_release(struct mtk_ecc *);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index d86a7d131cc0..6977da3a26aa 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -97,7 +97,6 @@
#define MTK_TIMEOUT (500000)
#define MTK_RESET_TIMEOUT (1000000)
-#define MTK_MAX_SECTOR (16)
#define MTK_NAND_MAX_NSELS (2)
#define MTK_NFC_MIN_SPARE (16)
#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
@@ -109,6 +108,8 @@ struct mtk_nfc_caps {
u8 num_spare_size;
u8 pageformat_spare_shift;
u8 nfi_clk_div;
+ u8 max_sector;
+ u32 max_sector_size;
};
struct mtk_nfc_bad_mark_ctl {
@@ -173,6 +174,10 @@ static const u8 spare_size_mt2712[] = {
74
};
+static const u8 spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
{
return container_of(nand, struct mtk_nfc_nand_chip, nand);
@@ -450,7 +455,7 @@ static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
* set to max sector to allow the HW to continue reading over
* unaligned accesses
*/
- reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
nfi_writel(nfc, reg, NFI_CON);
/* trigger to fetch data */
@@ -481,7 +486,7 @@ static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
nfi_writew(nfc, reg, NFI_CNFG);
- reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
@@ -761,6 +766,8 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u32 reg;
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (!raw) {
/* OOB => FDM: from register, ECC: from HW */
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
@@ -794,7 +801,10 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (!raw)
mtk_ecc_disable(nfc->ecc);
- return ret;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
@@ -832,18 +842,7 @@ static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- int ret;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
- if (ret < 0)
- return -EIO;
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- ret = chip->waitfunc(mtd, chip);
-
- return ret & NAND_STATUS_FAIL ? -EIO : 0;
+ return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
}
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
@@ -892,8 +891,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
buf = bufpoi + start * chip->ecc.size;
- if (column != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+ nand_read_page_op(chip, page, column, NULL, 0);
addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
rc = dma_mapping_error(nfc->dev, addr);
@@ -1016,8 +1014,6 @@ static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1126,9 +1122,11 @@ static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 ecc_bytes;
- ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
+ mtk_ecc_get_parity_bits(nfc->ecc), 8);
fdm->reg_size = chip->spare_per_sector - ecc_bytes;
if (fdm->reg_size > NFI_FDM_MAX_SIZE)
@@ -1208,7 +1206,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
* this controller only supports 512 and 1024 sizes
*/
if (nand->ecc.size < 1024) {
- if (mtd->writesize > 512) {
+ if (mtd->writesize > 512 &&
+ nfc->caps->max_sector_size > 512) {
nand->ecc.size = 1024;
nand->ecc.strength <<= 1;
} else {
@@ -1223,7 +1222,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
return ret;
/* calculate oob bytes except ecc parity data */
- free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+ free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ + 7) >> 3;
free = spare - free;
/*
@@ -1233,10 +1233,12 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
*/
if (free > NFI_FDM_MAX_SIZE) {
spare -= NFI_FDM_MAX_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
} else if (free < 0) {
spare -= NFI_FDM_MIN_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
}
}
@@ -1389,6 +1391,8 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
.num_spare_size = 16,
.pageformat_spare_shift = 4,
.nfi_clk_div = 1,
+ .max_sector = 16,
+ .max_sector_size = 1024,
};
static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
@@ -1396,6 +1400,17 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
.num_spare_size = 19,
.pageformat_spare_shift = 16,
.nfi_clk_div = 2,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
+ .spare_size = spare_size_mt7622,
+ .num_spare_size = 4,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 8,
+ .max_sector_size = 512,
};
static const struct of_device_id mtk_nfc_id_table[] = {
@@ -1405,6 +1420,9 @@ static const struct of_device_id mtk_nfc_id_table[] = {
}, {
.compatible = "mediatek,mt2712-nfc",
.data = &mtk_nfc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-nfc",
+ .data = &mtk_nfc_caps_mt7622,
},
{}
};
@@ -1540,7 +1558,6 @@ static int mtk_nfc_resume(struct device *dev)
struct mtk_nfc *nfc = dev_get_drvdata(dev);
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
- struct mtd_info *mtd;
int ret;
u32 i;
@@ -1553,11 +1570,8 @@ static int mtk_nfc_resume(struct device *dev)
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
nand = &chip->nand;
- mtd = nand_to_mtd(nand);
- for (i = 0; i < chip->nsels; i++) {
- nand->select_chip(mtd, i);
- nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- }
+ for (i = 0; i < chip->nsels; i++)
+ nand_reset(nand, i);
}
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 6135d007a068..e70ca16a5118 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -561,14 +561,19 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ u8 status;
+ int ret;
/* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
}
/**
@@ -667,16 +672,83 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
{
register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
do {
- if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
};
/**
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
+ *
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ u8 status = 0;
+ int ret;
+
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ break;
+
+ if (status & NAND_STATUS_READY)
+ break;
+
+ /*
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
+ */
+ udelay(10);
+ } while (time_before(jiffies, timeout_ms));
+
+ /*
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
+ */
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
@@ -710,7 +782,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
- chip->cmd_ctrl(mtd, command, ctrl);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -738,6 +811,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@@ -802,8 +876,8 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
- ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+ if (chip->setup_data_interface)
+ ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
else
ndelay(500);
}
@@ -831,7 +905,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -866,6 +942,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
@@ -1014,7 +1091,15 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
}
mdelay(1);
@@ -1031,8 +1116,9 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- int status;
unsigned long timeo = 400;
+ u8 status;
+ int ret;
/*
* Apply this short delay always to ensure that we do wait tWB in any
@@ -1040,7 +1126,9 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
ndelay(100);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
@@ -1051,14 +1139,22 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
break;
}
cond_resched();
} while (time_before(jiffies, timeo));
}
- status = (int)chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return ret;
+
/* This can happen if in case of timeout or buggy dev_ready */
WARN_ON(!(status & NAND_STATUS_READY));
return status;
@@ -1076,7 +1172,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- const struct nand_data_interface *conf;
int ret;
if (!chip->setup_data_interface)
@@ -1096,8 +1191,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
* timings to timing mode 0.
*/
- conf = nand_get_default_data_interface();
- ret = chip->setup_data_interface(mtd, chipnr, conf);
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -1122,7 +1217,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- if (!chip->setup_data_interface || !chip->data_interface)
+ if (!chip->setup_data_interface)
return 0;
/*
@@ -1143,7 +1238,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
goto err;
}
- ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
err:
return ret;
}
@@ -1183,21 +1278,19 @@ static int nand_init_data_interface(struct nand_chip *chip)
modes = GENMASK(chip->onfi_timing_mode_default, 0);
}
- chip->data_interface = kzalloc(sizeof(*chip->data_interface),
- GFP_KERNEL);
- if (!chip->data_interface)
- return -ENOMEM;
for (mode = fls(modes) - 1; mode >= 0; mode--) {
- ret = onfi_init_data_interface(chip, chip->data_interface,
- NAND_SDR_IFACE, mode);
+ ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
if (ret)
continue;
- /* Pass -1 to only */
+ /*
+ * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
+ * controller supports the requested timings.
+ */
ret = chip->setup_data_interface(mtd,
NAND_DATA_IFACE_CHECK_ONLY,
- chip->data_interface);
+ &chip->data_interface);
if (!ret) {
chip->onfi_timing_mode_default = mode;
break;
@@ -1207,21 +1300,1429 @@ static int nand_init_data_interface(struct nand_chip *chip)
return 0;
}
-static void nand_release_data_interface(struct nand_chip *chip)
+/**
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
+ *
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
+ */
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
+ }
+
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
+}
+
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[1] = page;
+ addrs[2] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ if (mtd->writesize > 512)
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ int ret;
+ u8 status;
+
+ if (naddrs < 0)
+ return naddrs;
+
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
+
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!prog || ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
{
- kfree(chip->data_interface);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+ chip->write_buf(mtd, buf, len);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ instrs[2].ctx.data.force_8bit = force_8bit;
+
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *id = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (!status)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ PSEC_TO_MSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *params = data;
+ int i, ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, params[i]);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *params = data;
+ int i;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+ } else {
+ chip->read_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->write_byte(mtd, p[i]);
+ } else {
+ chip->write_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
+ *
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
+ */
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
+
+/**
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
+ */
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
+{
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
+
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
+
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
+ */
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
+{
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
+
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
+
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
+
+ /*
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
+ */
+ if (!ninstrs)
+ return false;
+
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
+ }
+
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
+
+ pr_debug("executing subop:\n");
+
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
+
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
+ }
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
+
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ int ret;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ const struct nand_op_parser_pattern *pattern;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &ctx))
+ continue;
+
+ nand_op_parser_trace(&ctx);
+
+ if (check_only)
+ break;
+
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (i == parser->npatterns) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
+
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
+
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
+
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
+
+static int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
+}
+
+/**
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
+ */
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
+
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
+
+/**
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
+ */
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
+
+/**
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
+ */
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off = 0, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
* @chipnr: Internal die id
*
- * Returns 0 for success or negative error code otherwise
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_data_interface for details), do the reset operation, and
+ * apply back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
int nand_reset(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_data_interface saved_data_intf = chip->data_interface;
int ret;
ret = nand_reset_data_interface(chip, chipnr);
@@ -1233,10 +2734,13 @@ int nand_reset(struct nand_chip *chip, int chipnr)
* interface settings, hence this weird ->select_chip() dance.
*/
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
chip->select_chip(mtd, chipnr);
+ chip->data_interface = saved_data_intf;
ret = nand_setup_data_interface(chip, chipnr);
chip->select_chip(mtd, -1);
if (ret)
@@ -1390,9 +2894,19 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
EXPORT_SYMBOL(nand_read_page_raw);
@@ -1414,29 +2928,50 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->read_buf(mtd, buf, eccsize);
+ ret = nand_read_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->read_buf(mtd, oob, eccbytes);
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->read_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1456,8 +2991,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1521,15 +3056,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
data_col_addr = start_step * chip->ecc.size;
/* If we read not a page aligned data */
- if (data_col_addr != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
-
p = bufpoi + data_col_addr;
- chip->read_buf(mtd, p, datafrag_len);
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
- chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+ chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
/*
* The performance is faster if we position offsets according to
@@ -1543,8 +3077,11 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
gaps = 1;
if (gaps) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
} else {
/*
* Send the command to read the particular ECC bytes take care
@@ -1558,12 +3095,15 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
(busw - 1))
aligned_len++;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + aligned_pos, -1);
- chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
}
- ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
chip->oob_poi, index, eccfrag_len);
if (ret)
return ret;
@@ -1572,13 +3112,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
- stat = chip->ecc.correct(mtd, p,
- &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
- &chip->buffers->ecccode[i],
+ &chip->ecc.code_buf[i],
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
@@ -1611,16 +3151,27 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1674,14 +3225,18 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1692,7 +3247,11 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
@@ -1729,7 +3288,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int ret, i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1737,25 +3296,44 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
- chip->read_buf(mtd, oob, eccbytes);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
stat = chip->ecc.correct(mtd, p, oob, NULL);
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
@@ -1779,8 +3357,11 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->read_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
return max_bitflips;
}
@@ -1894,16 +3475,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
- bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+ bufpoi = use_bufpoi ? chip->data_buf : buf;
if (use_bufpoi && aligned)
pr_debug("%s: using read bounce buffer for buf@%p\n",
__func__, buf);
read_retry:
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-
/*
* Now read the page into the buffer. Absent an error,
* the read methods return max bitflips per ecc step.
@@ -1938,7 +3516,7 @@ read_retry:
/* Invalidate page cache */
chip->pagebuf = -1;
}
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
}
if (unlikely(oob)) {
@@ -1979,7 +3557,7 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
} else {
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagebuf_bitflips);
@@ -2027,33 +3605,6 @@ read_retry:
}
/**
- * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * Get hold of the chip and call nand_do_read.
- */
-static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_READING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_read_ops(mtd, from, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -2061,9 +3612,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
EXPORT_SYMBOL(nand_read_oob_std);
@@ -2081,25 +3630,43 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
uint8_t *bufpoi = chip->oob_poi;
- int i, toread, sndrnd = 0, pos;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
for (i = 0; i < chip->ecc.steps; i++) {
if (sndrnd) {
+ int ret;
+
pos = eccsize + i * (eccsize + chunk);
if (mtd->writesize > 512)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
else
- chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
} else
sndrnd = 1;
toread = min_t(int, length, chunk);
- chip->read_buf(mtd, bufpoi, toread);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false);
+ if (ret)
+ return ret;
+
bufpoi += toread;
length -= toread;
}
- if (length > 0)
- chip->read_buf(mtd, bufpoi, length);
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -2113,18 +3680,8 @@ EXPORT_SYMBOL(nand_read_oob_syndrome);
*/
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- int status = 0;
- const uint8_t *buf = chip->oob_poi;
- int length = mtd->oobsize;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, buf, length);
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
EXPORT_SYMBOL(nand_write_oob_std);
@@ -2140,7 +3697,7 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
- int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
const uint8_t *bufpoi = chip->oob_poi;
/*
@@ -2154,7 +3711,10 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
} else
pos = eccsize;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; i < steps; i++) {
if (sndcmd) {
if (mtd->writesize <= 512) {
@@ -2163,28 +3723,40 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
len = eccsize;
while (len > 0) {
int num = min_t(int, len, 4);
- chip->write_buf(mtd, (uint8_t *)&fill,
- num);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
len -= num;
}
} else {
pos = eccsize + i * (eccsize + chunk);
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
}
} else
sndcmd = 1;
len = min_t(int, length, chunk);
- chip->write_buf(mtd, bufpoi, len);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
bufpoi += len;
length -= len;
}
- if (length > 0)
- chip->write_buf(mtd, bufpoi, length);
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_oob_syndrome);
@@ -2199,6 +3771,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats stats;
@@ -2214,21 +3787,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start read outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
- (from >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -2256,6 +3814,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
readlen -= len;
if (!readlen)
break;
@@ -2281,7 +3841,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
@@ -2299,13 +3859,6 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->retlen = 0;
- /* Do not allow reads past end of device */
- if (ops->datbuf && (from + ops->len) > mtd->size) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
if (ops->mode != MTD_OPS_PLACE_OOB &&
ops->mode != MTD_OPS_AUTO_OOB &&
ops->mode != MTD_OPS_RAW)
@@ -2336,11 +3889,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
- return 0;
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_page_raw);
@@ -2362,31 +3924,52 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->write_buf(mtd, buf, eccsize);
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->write_buf(mtd, oob, eccbytes);
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->write_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -2403,7 +3986,7 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
/* Software ECC calculation */
@@ -2433,12 +4016,20 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
@@ -2447,9 +4038,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
return ret;
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2469,7 +4062,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_required, int page)
{
uint8_t *oob_buf = chip->oob_poi;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -2478,12 +4071,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_bytes = mtd->oobsize / ecc_steps;
int step, ret;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
- chip->write_buf(mtd, buf, ecc_size);
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
@@ -2503,16 +4102,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2537,33 +4138,55 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
const uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ int ret;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.calculate(mtd, p, oob);
- chip->write_buf(mtd, oob, eccbytes);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->write_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -2589,9 +4212,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
else
subpage = 0;
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
oob_required, page);
@@ -2605,14 +4225,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (status < 0)
return status;
- if (nand_standard_page_accessors(&chip->ecc)) {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- }
-
return 0;
}
@@ -2737,9 +4349,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- memcpy(&chip->buffers->databuf[column], buf, bytes);
- wbuf = chip->buffers->databuf;
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ memcpy(&chip->data_buf[column], buf, bytes);
+ wbuf = chip->data_buf;
}
if (unlikely(oob)) {
@@ -2822,33 +4434,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC.
- */
-static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_WRITING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = (uint8_t *)buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @mtd: MTD device structure
* @to: offset to write to
@@ -2874,22 +4459,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start write outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow write past end of device */
- if (unlikely(to >= mtd->size ||
- ops->ooboffs + ops->ooblen >
- ((mtd->size >> chip->page_shift) -
- (to >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(to >> chip->chip_shift);
/*
@@ -2945,13 +4514,6 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
- /* Do not allow writes past end of device */
- if (ops->datbuf && (to + ops->len) > mtd->size) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
@@ -2984,11 +4546,12 @@ out:
static int single_erase(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eraseblock;
+
/* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
- return chip->waitfunc(mtd, chip);
+ return nand_erase_op(chip, eraseblock);
}
/**
@@ -3072,7 +4635,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
status = chip->erase(mtd, page & chip->pagemask);
/* See if block erase succeeded */
- if (status & NAND_STATUS_FAIL) {
+ if (status) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
@@ -3215,22 +4778,12 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int status;
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, subfeature_param[i]);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- return 0;
+ return nand_set_features_op(chip, addr, subfeature_param);
}
/**
@@ -3243,17 +4796,12 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->read_byte(mtd);
- return 0;
+ return nand_get_features_op(chip, addr, subfeature_param);
}
/**
@@ -3319,7 +4867,7 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->chip_delay = 20;
/* check, if a user supplied command function given */
- if (chip->cmdfunc == NULL)
+ if (!chip->cmdfunc && !chip->exec_op)
chip->cmdfunc = nand_command;
/* check, if a user supplied wait function given */
@@ -3396,12 +4944,11 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
struct onfi_ext_param_page *ep;
struct onfi_ext_section *s;
struct onfi_ext_ecc_info *ecc;
uint8_t *cursor;
- int ret = -EINVAL;
+ int ret;
int len;
int i;
@@ -3411,14 +4958,18 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
return -ENOMEM;
/* Send our own NAND_CMD_PARAM. */
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ goto ext_out;
/* Use the Change Read Column command to skip the ONFI param pages. */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- sizeof(*p) * p->num_of_param_pages , -1);
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
- /* Read out the Extended Parameter Page. */
- chip->read_buf(mtd, (uint8_t *)ep, len);
+ ret = -EINVAL;
if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
!= le16_to_cpu(ep->crc))) {
pr_debug("fail in the CRC.\n");
@@ -3471,19 +5022,23 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_onfi_params *p = &chip->onfi_params;
- int i, j;
- int val;
+ char id[4];
+ int i, ret, val;
/* Try ONFI for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
- if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
- chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
+
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
break;
@@ -3574,20 +5129,22 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_jedec_params *p = &chip->jedec_params;
struct jedec_ecc_info *ecc;
- int val;
- int i, j;
+ char id[5];
+ int i, val, ret;
/* Try JEDEC for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'C')
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
le16_to_cpu(p->crc))
@@ -3866,8 +5423,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer *manufacturer;
struct mtd_info *mtd = nand_to_mtd(chip);
- int busw;
- int i;
+ int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
@@ -3875,17 +5431,21 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- nand_reset(chip, 0);
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
/* Select the device */
chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
/* Read manufacturer and device IDs */
- maf_id = chip->read_byte(mtd);
- dev_id = chip->read_byte(mtd);
+ maf_id = id_data[0];
+ dev_id = id_data[1];
/*
* Try again to make sure, as some systems the bus-hold or other
@@ -3894,11 +5454,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* not match, ignore the device completely.
*/
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
/* Read entire ID string */
- for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
- id_data[i] = chip->read_byte(mtd);
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
if (id_data[0] != maf_id || id_data[1] != dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
@@ -4190,6 +5749,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
+ /* Enforce the right timings for reset/detection */
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+
ret = nand_dt_init(chip);
if (ret)
return ret;
@@ -4197,15 +5759,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ /*
+ * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
+ * populated.
+ */
+ if (!chip->exec_op) {
/*
- * Default functions assigned for chip_select() and
- * cmdfunc() both expect cmd_ctrl() to be populated,
- * so we need to check that that's the case
+ * Default functions assigned for ->cmdfunc() and
+ * ->select_chip() both expect ->cmd_ctrl() to be populated.
*/
- pr_err("chip.cmd_ctrl() callback is not provided");
- return -EINVAL;
+ if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ pr_err("->cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
}
+
/* Set the default functions */
nand_set_defaults(chip);
@@ -4225,15 +5793,16 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
chip->select_chip(mtd, i);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
- if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd)) {
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
chip->select_chip(mtd, -1);
break;
}
@@ -4600,26 +6169,6 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
}
-static bool invalid_ecc_page_accessors(struct nand_chip *chip)
-{
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (nand_standard_page_accessors(ecc))
- return false;
-
- /*
- * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
- * controller driver implements all the page accessors because
- * default helpers are not suitable when the core does not
- * send the READ0/PAGEPROG commands.
- */
- return (!ecc->read_page || !ecc->write_page ||
- !ecc->read_page_raw || !ecc->write_page_raw ||
- (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
- (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
- ecc->hwctl && ecc->calculate));
-}
-
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -4632,7 +6181,6 @@ int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_buffers *nbuf = NULL;
int ret, i;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4641,39 +6189,9 @@ int nand_scan_tail(struct mtd_info *mtd)
return -EINVAL;
}
- if (invalid_ecc_page_accessors(chip)) {
- pr_err("Invalid ECC page accessors setup\n");
- return -EINVAL;
- }
-
- if (!(chip->options & NAND_OWN_BUFFERS)) {
- nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
- if (!nbuf)
- return -ENOMEM;
-
- nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccalc) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccode) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!nbuf->databuf) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- chip->buffers = nbuf;
- } else if (!chip->buffers) {
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
return -ENOMEM;
- }
/*
* FIXME: some NAND manufacturer drivers expect the first die to be
@@ -4685,10 +6203,10 @@ int nand_scan_tail(struct mtd_info *mtd)
ret = nand_manufacturer_init(chip);
chip->select_chip(mtd, -1);
if (ret)
- goto err_free_nbuf;
+ goto err_free_buf;
/* Set the internal oob buffer location, just after the page data */
- chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+ chip->oob_poi = chip->data_buf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
@@ -4836,6 +6354,15 @@ int nand_scan_tail(struct mtd_info *mtd)
goto err_nand_manuf_cleanup;
}
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
/* For many systems, the standard OOB write also works for raw */
if (!ecc->read_oob_raw)
ecc->read_oob_raw = ecc->read_oob;
@@ -4917,8 +6444,6 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = nand_read;
- mtd->_write = nand_write;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
@@ -4954,7 +6479,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->select_chip(mtd, -1);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
}
/* Check, if we should skip the bad block table scan */
@@ -4964,23 +6489,18 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
ret = chip->scan_bbt(mtd);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
return 0;
-err_nand_data_iface_cleanup:
- nand_release_data_interface(chip);
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
-err_free_nbuf:
- if (nbuf) {
- kfree(nbuf->databuf);
- kfree(nbuf->ecccode);
- kfree(nbuf->ecccalc);
- kfree(nbuf);
- }
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
return ret;
}
@@ -5028,16 +6548,11 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
- nand_release_data_interface(chip);
-
/* Free bad block table memory */
kfree(chip->bbt);
- if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
- kfree(chip->buffers->databuf);
- kfree(chip->buffers->ecccode);
- kfree(chip->buffers->ecccalc);
- kfree(chip->buffers);
- }
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2915b6739bf8..36092850be2c 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -898,7 +898,7 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
{
struct nand_chip *this = mtd_to_nand(mtd);
- return create_bbt(mtd, this->buffers->databuf, bd, -1);
+ return create_bbt(mtd, this->data_buf, bd, -1);
}
/**
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
index 985751eda317..d542908a0ebb 100644
--- a/drivers/mtd/nand/nand_hynix.c
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -67,15 +67,43 @@ struct hynix_read_retry_otp {
static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
struct mtd_info *mtd = nand_to_mtd(chip);
- u8 jedecid[6] = { };
- int i = 0;
+ u16 column = ((u16)addr << 8) | addr;
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- for (i = 0; i < 5; i++)
- jedecid[i] = chip->read_byte(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
+ chip->write_byte(mtd, val);
- return !strcmp("JEDEC", jedecid);
+ return 0;
}
static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
@@ -83,14 +111,15 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
struct nand_chip *chip = mtd_to_nand(mtd);
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
const u8 *values;
- int status;
- int i;
+ int i, ret;
values = hynix->read_retry->values +
(retry_mode * hynix->read_retry->nregs);
/* Enter 'Set Hynix Parameters' mode */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
/*
* Configure the NAND in the requested read-retry mode.
@@ -102,21 +131,14 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
* probably tweaked at production in this case).
*/
for (i = 0; i < hynix->read_retry->nregs; i++) {
- int column = hynix->read_retry->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, values[i]);
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
}
/* Apply the new settings. */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
}
/**
@@ -172,40 +194,63 @@ static int hynix_read_rr_otp(struct nand_chip *chip,
const struct hynix_read_retry_otp *info,
void *buf)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i;
+ int i, ret;
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
for (i = 0; i < info->nregs; i++) {
- int column = info->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, info->values[i]);
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
}
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
/* Sequence to enter OTP mode? */
- chip->cmdfunc(mtd, 0x17, -1, -1);
- chip->cmdfunc(mtd, 0x04, -1, -1);
- chip->cmdfunc(mtd, 0x19, -1, -1);
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
/* Now read the page */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page);
- chip->read_buf(mtd, buf, info->size);
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
/* Put everything back to normal */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1);
- chip->write_byte(mtd, 0x0);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- return 0;
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
}
#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
index abf6a3c376e8..02e109ae73f1 100644
--- a/drivers/mtd/nand/nand_micron.c
+++ b/drivers/mtd/nand/nand_micron.c
@@ -117,16 +117,28 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required,
int page)
{
- int status;
- int max_bitflips = 0;
+ u8 status;
+ int ret, max_bitflips = 0;
- micron_nand_on_die_ecc_setup(chip, true);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = chip->read_byte(mtd);
if (status & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
+
/*
* The internal ECC doesn't tell us the number of bitflips
* that have been corrected, but tells us if it recommends to
@@ -137,13 +149,15 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
else if (status & NAND_STATUS_WRITE_RECOMMENDED)
max_bitflips = chip->ecc.strength;
- chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
-
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+out:
micron_nand_on_die_ecc_setup(chip, false);
- return max_bitflips;
+ return ret ? ret : max_bitflips;
}
static int
@@ -151,46 +165,16 @@ micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- int status;
-
- micron_nand_on_die_ecc_setup(chip, true);
+ int ret;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+ ret = nand_write_page_raw(mtd, chip, buf, oob_required, page);
micron_nand_on_die_ecc_setup(chip, false);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int
-micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int oob_required,
- int page)
-{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
-
- return 0;
-}
-
-static int
-micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return ret;
}
enum {
@@ -285,17 +269,14 @@ static int micron_nand_init(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.bytes = 8;
chip->ecc.size = 512;
chip->ecc.strength = 4;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
- chip->ecc.read_page_raw =
- micron_nand_read_page_raw_on_die_ecc;
- chip->ecc.write_page_raw =
- micron_nand_write_page_raw_on_die_ecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
}
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index d348f0129ae7..ef022f62f74c 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -91,6 +91,25 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
}
} else {
nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ chip->ecc_step_ds = 512;
+ chip->ecc_strength_ds = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
}
}
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 5d1533bcc5bd..9400d039ddbd 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -283,16 +283,16 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
/**
- * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
* given ONFI mode
- * @iface: The data interface to be initialized
* @mode: The ONFI timing mode
*/
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode)
{
+ struct nand_data_interface *iface = &chip->data_interface;
+
if (type != NAND_SDR_IFACE)
return -EINVAL;
@@ -321,15 +321,4 @@ int onfi_init_data_interface(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL(onfi_init_data_interface);
-
-/**
- * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
- * data interface for mode 0. This is used as default timing after
- * reset.
- */
-const struct nand_data_interface *nand_get_default_data_interface(void)
-{
- return &onfi_sdr_timings[0];
-}
-EXPORT_SYMBOL(nand_get_default_data_interface);
+EXPORT_SYMBOL(onfi_fill_data_interface);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index dad438c4906a..8cdf7d3d8fa7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1530,7 +1530,9 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
int ret;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1548,7 +1550,8 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
/* Write ecc vector to OOB area */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1568,7 +1571,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
- u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -1582,6 +1585,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
* ECC is calculated for all subpages but we choose
* only what we want.
*/
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1605,7 +1609,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
@@ -1614,7 +1618,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1635,11 +1639,13 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int stat, ret;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1647,10 +1653,10 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, buf, mtd->writesize);
/* Read oob bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
- chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
- chip->ecc.total);
+ nand_change_read_column_op(chip,
+ mtd->writesize + BADBLOCK_MARKER_LENGTH,
+ chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+ chip->ecc.total, false);
/* Calculate ecc bytes */
omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 9285f60e5783..d1979c7dbe7e 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -520,15 +520,13 @@ static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
struct nand_chip *chip = &host->chip;
struct pxa3xx_nand_info *info = host->info_data;
const struct pxa3xx_nand_flash *f = NULL;
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
int i, id, ntypes;
+ u8 idbuf[2];
ntypes = ARRAY_SIZE(builtin_flash_types);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
- id = chip->read_byte(mtd);
- id |= chip->read_byte(mtd) << 0x8;
+ nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
+ id = idbuf[0] | (idbuf[1] << 8);
for (i = 0; i < ntypes; i++) {
f = &builtin_flash_types[i];
@@ -1351,10 +1349,10 @@ static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -1364,7 +1362,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
struct pxa3xx_nand_info *info = host->info_data;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (info->retcode == ERR_CORERR && info->use_ecc) {
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 2656c1ac5646..563b759ffca6 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/dma/qcom_bam_dma.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -1725,6 +1726,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf = NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -1750,6 +1752,7 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
int i, ret;
int read_loc;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = chip->oob_poi;
@@ -1850,6 +1853,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1902,6 +1907,9 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1916,6 +1924,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1970,6 +1979,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1990,7 +2002,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *oob = chip->oob_poi;
int data_size, oob_size;
- int ret, status = 0;
+ int ret;
host->use_ecc = true;
@@ -2027,11 +2039,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -2081,7 +2089,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int page, ret, status = 0;
+ int page, ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2114,11 +2122,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2636,6 +2640,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
nand_set_flash_node(chip, dn);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index fc9287af4614..595635b9e9de 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -364,7 +364,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct r852_device *dev = nand_get_controller_data(chip);
unsigned long timeout;
- int status;
+ u8 status;
timeout = jiffies + (chip->state == FL_ERASING ?
msecs_to_jiffies(400) : msecs_to_jiffies(20));
@@ -373,8 +373,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = (int)chip->read_byte(mtd);
+ nand_status_op(chip, &status);
/* Unfortunelly, no way to send detailed error status... */
if (dev->dma_error) {
@@ -522,9 +521,7 @@ exit:
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -1046,7 +1043,7 @@ static int r852_resume(struct device *device)
if (dev->card_registred) {
r852_engine_enable(dev);
dev->chip->select_chip(mtd, 0);
- dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(dev->chip);
dev->chip->select_chip(mtd, -1);
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 3c5008a4f5f3..c4e7755448e6 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -614,7 +614,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
@@ -624,9 +624,9 @@ static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
index d3e028e58b0f..1581671b05ae 100644
--- a/drivers/mtd/nand/sm_common.h
+++ b/drivers/mtd/nand/sm_common.h
@@ -36,7 +36,7 @@ struct sm_oob {
#define SM_SMALL_OOB_SIZE 8
-extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+int sm_register_device(struct mtd_info *mtd, int smartmedia);
static inline int sm_sector_valid(struct sm_oob *oob)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 82244be3e766..f5a55c63935c 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -958,12 +958,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
int ret;
if (*cur_off != data_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+ nand_change_read_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -991,16 +991,15 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
* Re-read the data with the randomizer disabled to identify
* bitflips in erased pages.
*/
- if (nand->options & NAND_NEED_SCRAMBLING) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- } else {
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand_change_read_column_op(nand, data_off, data,
+ ecc->size, false);
+ else
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
ecc->size);
- }
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
+ false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1011,7 +1010,8 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
if (oob_required) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0,
+ false);
sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
true, page);
@@ -1038,8 +1038,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- offset + mtd->writesize, -1);
+ nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
+ false);
if (!randomize)
sunxi_nfc_read_buf(mtd, oob + offset, len);
@@ -1116,9 +1116,9 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (oob_required && !erased) {
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
!i, page);
@@ -1143,18 +1143,17 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
/*
* Re-read the data with the randomizer disabled to
* identify bitflips in erased pages.
+ * TODO: use DMA to read page in raw mode
*/
- if (randomized) {
- /* TODO: use DMA to read page in raw mode */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- }
+ if (randomized)
+ nand_change_read_column_op(nand, data_off,
+ data, ecc->size,
+ false);
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1187,12 +1186,12 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
int ret;
if (data_off != *cur_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
+ nand_change_write_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
+ nand_change_write_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -1228,8 +1227,8 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN,
- offset + mtd->writesize, -1);
+ nand_change_write_column_op(nand, offset + mtd->writesize,
+ NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
@@ -1246,6 +1245,8 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1279,14 +1280,14 @@ static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
{
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
chip->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
}
@@ -1299,6 +1300,8 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1330,13 +1333,13 @@ static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
buf, page);
}
@@ -1349,6 +1352,8 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1370,7 +1375,7 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
@@ -1382,6 +1387,8 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1400,7 +1407,7 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
@@ -1430,6 +1437,8 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
sunxi_nfc_randomizer_config(mtd, page, false);
sunxi_nfc_randomizer_enable(mtd);
@@ -1460,7 +1469,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
NULL, page);
- return 0;
+ return nand_prog_page_end_op(chip);
pio_fallback:
return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
@@ -1476,6 +1485,8 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1512,6 +1523,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1533,41 +1546,33 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
chip->pagebuf = -1;
- return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, 1, page);
}
static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- int ret, status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ int ret;
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ ret = chip->ecc.write_page(mtd, chip, chip->data_buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1853,8 +1858,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
- if (ecc->strength <= strengths[i])
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
break;
+ }
}
if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 766906f03943..c5bee00b7f5e 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -329,7 +329,7 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+ nand_change_read_column_op(chip, *pos, NULL, 0, false);
} else {
tango_read_buf(mtd, *buf, len);
*buf += len;
@@ -344,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1);
+ nand_change_write_column_op(chip, *pos, NULL, 0, false);
} else {
tango_write_buf(mtd, *buf, len);
*buf += len;
@@ -427,7 +427,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, buf, chip->oob_poi);
return 0;
}
@@ -435,23 +435,15 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int oob_required, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, buf, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, NULL, chip->oob_poi);
return 0;
}
@@ -459,11 +451,9 @@ static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, NULL, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- chip->waitfunc(mtd, chip);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
@@ -590,7 +580,6 @@ static int chip_init(struct device *dev, struct device_node *np)
ecc->write_page = tango_write_page;
ecc->read_oob = tango_read_oob;
ecc->write_oob = tango_write_oob;
- ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
err = nand_scan_tail(mtd);
if (err)
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 84dbf32332e1..dcaa924502de 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -192,6 +192,7 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
long timeout;
+ u8 status;
/* enable RDYREQ interrupt */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
@@ -212,8 +213,8 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
}
- nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return nand_chip->read_byte(mtd);
+ nand_status_op(nand_chip, &status);
+ return status;
}
/*
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 8037d4b48a05..80d31a58e558 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -560,7 +560,7 @@ static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
int eccsize = chip->ecc.size;
int stat;
- vf610_nfc_read_buf(mtd, buf, eccsize);
+ nand_read_page_op(chip, page, 0, buf, eccsize);
if (oob_required)
vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -580,7 +580,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
{
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
- vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -588,7 +588,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc->use_hw_ecc = true;
nfc->write_sz = mtd->writesize + mtd->oobsize;
- return 0;
+ return nand_prog_page_end_op(chip);
}
static const struct of_device_id vf610_nfc_dt_ids[] = {
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index dcae2f6a2b11..9dc15748947b 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -4,8 +4,7 @@ menuconfig MTD_ONENAND
depends on HAS_IOMEM
help
This enables support for accessing all type of OneNAND flash
- devices. For further information see
- <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
+ devices.
if MTD_ONENAND
@@ -26,9 +25,11 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on OF || COMPILE_TEST
help
- Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
+ Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
via the GPMC memory controller.
+ Enable dmaengine and gpiolib for better performance.
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 24a1388d3031..87c34f607a75 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -28,19 +28,18 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/mach/flash.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
@@ -50,24 +49,17 @@ struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
- unsigned int mem_size;
- int gpio_irq;
+ struct gpio_desc *int_gpiod;
struct mtd_info mtd;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
- int dma_channel;
- int freq;
- int (*setup)(void __iomem *base, int *freq_ptr);
- struct regulator *regulator;
- u8 flags;
+ struct dma_chan *dma_chan;
};
-static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap2_onenand_dma_complete_func(void *completion)
{
- struct omap2_onenand *c = data;
-
- complete(&c->dma_done);
+ complete(completion);
}
static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
@@ -90,6 +82,65 @@ static inline void write_reg(struct omap2_onenand *c, unsigned short value,
writew(value, c->onenand.base + reg);
}
+static int omap2_onenand_set_cfg(struct omap2_onenand *c,
+ bool sr, bool sw,
+ int latency, int burst_len)
+{
+ unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+
+ reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
+
+ switch (burst_len) {
+ case 0: /* continuous */
+ break;
+ case 4:
+ reg |= ONENAND_SYS_CFG1_BL_4;
+ break;
+ case 8:
+ reg |= ONENAND_SYS_CFG1_BL_8;
+ break;
+ case 16:
+ reg |= ONENAND_SYS_CFG1_BL_16;
+ break;
+ case 32:
+ reg |= ONENAND_SYS_CFG1_BL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (latency > 5)
+ reg |= ONENAND_SYS_CFG1_HF;
+ if (latency > 7)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ if (sr)
+ reg |= ONENAND_SYS_CFG1_SYNC_READ;
+ if (sw)
+ reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
+
+ write_reg(c, reg, ONENAND_REG_SYS_CFG1);
+
+ return 0;
+}
+
+static int omap2_onenand_get_freq(int ver)
+{
+ switch ((ver >> 4) & 0xf) {
+ case 0:
+ return 40;
+ case 1:
+ return 54;
+ case 2:
+ return 66;
+ case 3:
+ return 83;
+ case 4:
+ return 104;
+ }
+
+ return -EINVAL;
+}
+
static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
{
printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
@@ -153,28 +204,22 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
- if (c->flags & ONENAND_IN_OMAP34XX)
- /* Add a delay to let GPIO settle */
- syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ /* Add a delay to let GPIO settle */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
reinit_completion(&c->irq_done);
- if (c->gpio_irq) {
- result = gpio_get_value(c->gpio_irq);
- if (result == -1) {
- ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
- intr = read_reg(c, ONENAND_REG_INTERRUPT);
- wait_err("gpio error", state, ctrl, intr);
- return -EIO;
- }
- } else
- result = 0;
- if (result == 0) {
+ result = gpiod_get_value(c->int_gpiod);
+ if (result < 0) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return result;
+ } else if (result == 0) {
int retry_cnt = 0;
retry:
- result = wait_for_completion_timeout(&c->irq_done,
- msecs_to_jiffies(20));
- if (result == 0) {
+ if (!wait_for_completion_io_timeout(&c->irq_done,
+ msecs_to_jiffies(20))) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
@@ -291,9 +336,42 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
return 0;
}
-#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
+ dma_addr_t src, dma_addr_t dst,
+ size_t count)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ if (!tx) {
+ dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ reinit_completion(&c->dma_done);
+
+ tx->callback = omap2_onenand_dma_complete_func;
+ tx->callback_param = &c->dma_done;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(c->dma_chan);
+
+ if (!wait_for_completion_io_timeout(&c->dma_done,
+ msecs_to_jiffies(20))) {
+ dmaengine_terminate_sync(c->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
@@ -301,10 +379,9 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
size_t xtra;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -341,25 +418,10 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -371,7 +433,7 @@ out_copy:
return 0;
}
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
@@ -379,9 +441,8 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -412,25 +473,10 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
return -1;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -442,136 +488,6 @@ out_copy:
return 0;
}
-#else
-
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy(buffer, (__force void *)(this->base + bram_offset),
- count);
- return 0;
- }
-
- dma_src = c->phys_base + bram_offset;
- dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count / 4, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
-
- return 0;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy((__force void *)(this->base + bram_offset), buffer,
- count);
- return 0;
- }
-
- dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
- DMA_TO_DEVICE);
- dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_src)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
- count / 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
-
- return 0;
-}
-
-#else
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-static struct platform_driver omap2_onenand_driver;
-
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -583,168 +499,117 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
-static int omap2_onenand_enable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_enable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't enable regulator\n");
-
- return ret;
-}
-
-static int omap2_onenand_disable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_disable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't disable regulator\n");
-
- return ret;
-}
-
static int omap2_onenand_probe(struct platform_device *pdev)
{
- struct omap_onenand_platform_data *pdata;
- struct omap2_onenand *c;
- struct onenand_chip *this;
- int r;
+ u32 val;
+ dma_cap_mask_t mask;
+ int freq, latency, r;
struct resource *res;
+ struct omap2_onenand *c;
+ struct gpmc_onenand_info info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "error getting memory resource\n");
+ return -EINVAL;
+ }
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
+ r = of_property_read_u32(np, "reg", &val);
+ if (r) {
+ dev_err(dev, "reg not found in DT\n");
+ return r;
}
- c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+ c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
if (!c)
return -ENOMEM;
init_completion(&c->irq_done);
init_completion(&c->dma_done);
- c->flags = pdata->flags;
- c->gpmc_cs = pdata->cs;
- c->gpio_irq = pdata->gpio_irq;
- c->dma_channel = pdata->dma_channel;
- if (c->dma_channel < 0) {
- /* if -1, don't use DMA */
- c->gpio_irq = 0;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- r = -EINVAL;
- dev_err(&pdev->dev, "error getting memory resource\n");
- goto err_kfree;
- }
-
+ c->gpmc_cs = val;
c->phys_base = res->start;
- c->mem_size = resource_size(res);
-
- if (request_mem_region(c->phys_base, c->mem_size,
- pdev->dev.driver->name) == NULL) {
- dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
- c->phys_base, c->mem_size);
- r = -EBUSY;
- goto err_kfree;
- }
- c->onenand.base = ioremap(c->phys_base, c->mem_size);
- if (c->onenand.base == NULL) {
- r = -ENOMEM;
- goto err_release_mem_region;
- }
- if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, &c->freq);
- if (r < 0) {
- dev_err(&pdev->dev, "Onenand platform setup failed: "
- "%d\n", r);
- goto err_iounmap;
- }
- c->setup = pdata->onenand_setup;
+ c->onenand.base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(c->onenand.base))
+ return PTR_ERR(c->onenand.base);
+
+ c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
+ if (IS_ERR(c->int_gpiod)) {
+ r = PTR_ERR(c->int_gpiod);
+ /* Just try again if this happens */
+ if (r != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio: %d\n", r);
+ return r;
}
- if (c->gpio_irq) {
- if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
- dev_err(&pdev->dev, "Failed to request GPIO%d for "
- "OneNAND\n", c->gpio_irq);
- goto err_iounmap;
- }
- gpio_direction_input(c->gpio_irq);
+ if (c->int_gpiod) {
+ r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
+ omap2_onenand_interrupt,
+ IRQF_TRIGGER_RISING, "onenand", c);
+ if (r)
+ return r;
- if ((r = request_irq(gpio_to_irq(c->gpio_irq),
- omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
- pdev->dev.driver->name, c)) < 0)
- goto err_release_gpio;
+ c->onenand.wait = omap2_onenand_wait;
}
- if (c->dma_channel >= 0) {
- r = omap_request_dma(0, pdev->dev.driver->name,
- omap2_onenand_dma_cb, (void *) c,
- &c->dma_channel);
- if (r == 0) {
- omap_set_dma_write_mode(c->dma_channel,
- OMAP_DMA_WRITE_NON_POSTED);
- omap_set_dma_src_data_pack(c->dma_channel, 1);
- omap_set_dma_src_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- omap_set_dma_dest_data_pack(c->dma_channel, 1);
- omap_set_dma_dest_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- } else {
- dev_info(&pdev->dev,
- "failed to allocate DMA for OneNAND, "
- "using PIO instead\n");
- c->dma_channel = -1;
- }
- }
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
- dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
- c->onenand.base, c->freq);
+ c->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (c->dma_chan) {
+ c->onenand.read_bufferram = omap2_onenand_read_bufferram;
+ c->onenand.write_bufferram = omap2_onenand_write_bufferram;
+ }
c->pdev = pdev;
c->mtd.priv = &c->onenand;
+ c->mtd.dev.parent = dev;
+ mtd_set_of_node(&c->mtd, dev->of_node);
- c->mtd.dev.parent = &pdev->dev;
- mtd_set_of_node(&c->mtd, pdata->of_node);
-
- this = &c->onenand;
- if (c->dma_channel >= 0) {
- this->wait = omap2_onenand_wait;
- if (c->flags & ONENAND_IN_OMAP34XX) {
- this->read_bufferram = omap3_onenand_read_bufferram;
- this->write_bufferram = omap3_onenand_write_bufferram;
- } else {
- this->read_bufferram = omap2_onenand_read_bufferram;
- this->write_bufferram = omap2_onenand_write_bufferram;
- }
- }
+ dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
+ c->gpmc_cs, c->phys_base, c->onenand.base,
+ c->dma_chan ? "DMA" : "PIO");
- if (pdata->regulator_can_sleep) {
- c->regulator = regulator_get(&pdev->dev, "vonenand");
- if (IS_ERR(c->regulator)) {
- dev_err(&pdev->dev, "Failed to get regulator\n");
- r = PTR_ERR(c->regulator);
- goto err_release_dma;
+ if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ goto err_release_dma;
+
+ freq = omap2_onenand_get_freq(c->onenand.version_id);
+ if (freq > 0) {
+ switch (freq) {
+ case 104:
+ latency = 7;
+ break;
+ case 83:
+ latency = 6;
+ break;
+ case 66:
+ latency = 5;
+ break;
+ case 56:
+ latency = 4;
+ break;
+ default: /* 40 MHz or lower */
+ latency = 3;
+ break;
}
- c->onenand.enable = omap2_onenand_enable;
- c->onenand.disable = omap2_onenand_disable;
- }
- if (pdata->skip_initial_unlocking)
- this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
+ r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
+ freq, latency, &info);
+ if (r)
+ goto err_release_onenand;
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
- goto err_release_regulator;
+ r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
+ latency, info.burst_len);
+ if (r)
+ goto err_release_onenand;
- r = mtd_device_register(&c->mtd, pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ if (info.sync_read || info.sync_write)
+ dev_info(dev, "optimized timings for %d MHz\n", freq);
+ }
+
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
@@ -754,22 +619,9 @@ static int omap2_onenand_probe(struct platform_device *pdev)
err_release_onenand:
onenand_release(&c->mtd);
-err_release_regulator:
- regulator_put(c->regulator);
err_release_dma:
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
- if (c->gpio_irq)
- free_irq(gpio_to_irq(c->gpio_irq), c);
-err_release_gpio:
- if (c->gpio_irq)
- gpio_free(c->gpio_irq);
-err_iounmap:
- iounmap(c->onenand.base);
-err_release_mem_region:
- release_mem_region(c->phys_base, c->mem_size);
-err_kfree:
- kfree(c);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
return r;
}
@@ -779,27 +631,26 @@ static int omap2_onenand_remove(struct platform_device *pdev)
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
onenand_release(&c->mtd);
- regulator_put(c->regulator);
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
omap2_onenand_shutdown(pdev);
- if (c->gpio_irq) {
- free_irq(gpio_to_irq(c->gpio_irq), c);
- gpio_free(c->gpio_irq);
- }
- iounmap(c->onenand.base);
- release_mem_region(c->phys_base, c->mem_size);
- kfree(c);
return 0;
}
+static const struct of_device_id omap2_onenand_id_table[] = {
+ { .compatible = "ti,omap2-onenand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
+
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
.remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = omap2_onenand_id_table,
},
};
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1a6d0e367b89..979f4031f23c 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1383,15 +1383,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (from >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
stats = mtd->ecc_stats;
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
@@ -1448,38 +1439,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
/**
- * onenand_read - [MTD Interface] Read data from flash
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put data
- *
- * Read with ecc
-*/
-static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_4KB_PAGE(this) ?
- onenand_mlc_read_ops_nolock(mtd, from, &ops) :
- onenand_read_ops_nolock(mtd, from, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_read_oob - [MTD Interface] Read main and/or out-of-band
* @param mtd: MTD device structure
* @param from: offset to read from
@@ -2056,15 +2015,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(to >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (to >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to write past end of device\n",
- __func__);
- return -EINVAL;
- }
-
oobbuf = this->oob_buf;
oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
@@ -2129,35 +2079,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
}
/**
- * onenand_write - [MTD Interface] write buffer to FLASH
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
- *
- * Write with ECC
- */
-static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = (u_char *) buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_WRITING);
- ret = onenand_write_ops_nolock(mtd, to, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @param mtd: MTD device structure
* @param to: offset to write
@@ -4038,8 +3959,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->_erase = onenand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = onenand_read;
- mtd->_write = onenand_write;
mtd->_read_oob = onenand_read_oob;
mtd->_write_oob = onenand_write_oob;
mtd->_panic_write = onenand_panic_write;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index af0ac1a7bf8f..2e9d076e445a 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/mach/flash.h>
-
#include "samsung.h"
enum soc_type {
@@ -129,16 +127,13 @@ struct s3c_onenand {
struct platform_device *pdev;
enum soc_type type;
void __iomem *base;
- struct resource *base_res;
void __iomem *ahb_addr;
- struct resource *ahb_res;
int bootram_command;
- void __iomem *page_buf;
- void __iomem *oob_buf;
+ void *page_buf;
+ void *oob_buf;
unsigned int (*mem_addr)(int fba, int fpa, int fsa);
unsigned int (*cmd_map)(unsigned int type, unsigned int val);
void __iomem *dma_addr;
- struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
};
@@ -413,8 +408,8 @@ static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
/*
* Emulate Two BufferRAMs and access with 4 bytes pointer
*/
- m = (unsigned int *) onenand->page_buf;
- s = (unsigned int *) onenand->oob_buf;
+ m = onenand->page_buf;
+ s = onenand->oob_buf;
if (index) {
m += (this->writesize >> 2);
@@ -486,11 +481,11 @@ static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
unsigned char *p;
if (area == ONENAND_DATARAM) {
- p = (unsigned char *) onenand->page_buf;
+ p = onenand->page_buf;
if (index == 1)
p += this->writesize;
} else {
- p = (unsigned char *) onenand->oob_buf;
+ p = onenand->oob_buf;
if (index == 1)
p += mtd->oobsize;
}
@@ -851,15 +846,14 @@ static int s3c_onenand_probe(struct platform_device *pdev)
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
- mtd = kzalloc(size, GFP_KERNEL);
+ mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!mtd)
return -ENOMEM;
- onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
- if (!onenand) {
- err = -ENOMEM;
- goto onenand_fail;
- }
+ onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
+ GFP_KERNEL);
+ if (!onenand)
+ return -ENOMEM;
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
@@ -870,26 +864,12 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENOENT;
- goto ahb_resource_failed;
- }
+ onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->base))
+ return PTR_ERR(onenand->base);
- onenand->base_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->base_res) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- err = -EBUSY;
- goto resource_failed;
- }
+ onenand->phys_base = r->start;
- onenand->base = ioremap(r->start, resource_size(r));
- if (!onenand->base) {
- dev_err(&pdev->dev, "failed to map memory resource\n");
- err = -EFAULT;
- goto ioremap_failed;
- }
/* Set onenand_chip also */
this->base = onenand->base;
@@ -898,40 +878,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (onenand->type != TYPE_S5PC110) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no buffer memory resource defined\n");
- err = -ENOENT;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->ahb_res) {
- dev_err(&pdev->dev, "failed to request buffer memory resource\n");
- err = -EBUSY;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_addr = ioremap(r->start, resource_size(r));
- if (!onenand->ahb_addr) {
- dev_err(&pdev->dev, "failed to map buffer memory resource\n");
- err = -EINVAL;
- goto ahb_ioremap_failed;
- }
+ onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->ahb_addr))
+ return PTR_ERR(onenand->ahb_addr);
/* Allocate 4KiB BufferRAM */
- onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!onenand->page_buf) {
- err = -ENOMEM;
- goto page_buf_fail;
- }
+ onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
+ GFP_KERNEL);
+ if (!onenand->page_buf)
+ return -ENOMEM;
/* Allocate 128 SpareRAM */
- onenand->oob_buf = kzalloc(128, GFP_KERNEL);
- if (!onenand->oob_buf) {
- err = -ENOMEM;
- goto oob_buf_fail;
- }
+ onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
+ if (!onenand->oob_buf)
+ return -ENOMEM;
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
@@ -939,28 +899,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
} else { /* S5PC110 */
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no dma memory resource defined\n");
- err = -ENOENT;
- goto dma_resource_failed;
- }
-
- onenand->dma_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->dma_res) {
- dev_err(&pdev->dev, "failed to request dma memory resource\n");
- err = -EBUSY;
- goto dma_resource_failed;
- }
-
- onenand->dma_addr = ioremap(r->start, resource_size(r));
- if (!onenand->dma_addr) {
- dev_err(&pdev->dev, "failed to map dma memory resource\n");
- err = -EINVAL;
- goto dma_ioremap_failed;
- }
-
- onenand->phys_base = onenand->base_res->start;
+ onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->dma_addr))
+ return PTR_ERR(onenand->dma_addr);
s5pc110_dma_ops = s5pc110_dma_poll;
/* Interrupt support */
@@ -968,19 +909,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (r) {
init_completion(&onenand->complete);
s5pc110_dma_ops = s5pc110_dma_irq;
- err = request_irq(r->start, s5pc110_onenand_irq,
- IRQF_SHARED, "onenand", &onenand);
+ err = devm_request_irq(&pdev->dev, r->start,
+ s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand",
+ &onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
- goto scan_failed;
+ return err;
}
}
}
- if (onenand_scan(mtd, 1)) {
- err = -EFAULT;
- goto scan_failed;
- }
+ err = onenand_scan(mtd, 1);
+ if (err)
+ return err;
if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
@@ -994,40 +936,15 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = mtd_device_parse_register(mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
+ onenand_release(mtd);
+ return err;
+ }
platform_set_drvdata(pdev, mtd);
return 0;
-
-scan_failed:
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
-dma_ioremap_failed:
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
- kfree(onenand->oob_buf);
-oob_buf_fail:
- kfree(onenand->page_buf);
-page_buf_fail:
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
-ahb_ioremap_failed:
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
-dma_resource_failed:
-ahb_resource_failed:
- iounmap(onenand->base);
-ioremap_failed:
- if (onenand->base_res)
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-resource_failed:
- kfree(onenand);
-onenand_fail:
- kfree(mtd);
- return err;
}
static int s3c_onenand_remove(struct platform_device *pdev)
@@ -1035,25 +952,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
onenand_release(mtd);
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
-
- iounmap(onenand->base);
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-
- kfree(onenand->oob_buf);
- kfree(onenand->page_buf);
- kfree(onenand);
- kfree(mtd);
+
return 0;
}
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index 5fe0079ea5ed..8893dc82a5c8 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -192,7 +192,7 @@ static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
/* create physical-logical table */
for (block_num = 0; block_num < phymax; block_num++) {
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
if (mtd_block_isbad(mtd, block_adr))
continue;
@@ -219,7 +219,7 @@ exit:
return ret;
}
-void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
{
kfree(ftl->log2phy);
}
@@ -244,7 +244,7 @@ static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
return -EINVAL;
block_num = ftl->log2phy[log_num];
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
block_ofs = mtd_mod_by_eb((u32)from, mtd);
err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 75a2bc447a99..4b8e9183489a 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -58,6 +58,7 @@ struct cqspi_flash_pdata {
u8 data_width;
u8 cs;
bool registered;
+ bool use_direct_mode;
};
struct cqspi_st {
@@ -68,6 +69,7 @@ struct cqspi_st {
void __iomem *iobase;
void __iomem *ahb_base;
+ resource_size_t ahb_size;
struct completion transfer_complete;
struct mutex bus_mutex;
@@ -103,6 +105,7 @@ struct cqspi_st {
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
@@ -450,8 +453,7 @@ static int cqspi_command_write_addr(struct spi_nor *nor,
return cqspi_exec_flash_cmd(cqspi, reg);
}
-static int cqspi_indirect_read_setup(struct spi_nor *nor,
- const unsigned int from_addr)
+static int cqspi_read_setup(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -459,8 +461,6 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
unsigned int dummy_clk = 0;
unsigned int reg;
- writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
-
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
@@ -493,8 +493,8 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_read_execute(struct spi_nor *nor,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ loff_t from_addr, const size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -504,6 +504,7 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor,
unsigned int bytes_to_read = 0;
int ret = 0;
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
/* Clear all interrupts. */
@@ -570,8 +571,7 @@ failrd:
return ret;
}
-static int cqspi_indirect_write_setup(struct spi_nor *nor,
- const unsigned int to_addr)
+static int cqspi_write_setup(struct spi_nor *nor)
{
unsigned int reg;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -584,8 +584,6 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
reg = cqspi_calc_rdreg(nor, nor->program_opcode);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
- writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
-
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (nor->addr_width - 1);
@@ -593,8 +591,8 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_write_execute(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx)
+static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
+ const u8 *txbuf, const size_t n_tx)
{
const unsigned int page_size = nor->page_size;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -604,6 +602,7 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
unsigned int write_bytes;
int ret;
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
/* Clear all interrupts. */
@@ -894,17 +893,22 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
size_t len, const u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 0);
if (ret)
return ret;
- ret = cqspi_indirect_write_setup(nor, to);
+ ret = cqspi_write_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_write_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_toio(cqspi->ahb_base + to, buf, len);
+ else
+ ret = cqspi_indirect_write_execute(nor, to, buf, len);
if (ret)
return ret;
@@ -914,17 +918,22 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 1);
if (ret)
return ret;
- ret = cqspi_indirect_read_setup(nor, from);
+ ret = cqspi_read_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_read_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ else
+ ret = cqspi_indirect_read_execute(nor, buf, from, len);
if (ret)
return ret;
@@ -1059,6 +1068,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
+ u32 reg;
+
cqspi_controller_enable(cqspi, 0);
/* Configure the remap address register, no remap */
@@ -1081,6 +1092,11 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+ /* Enable Direct Access Controller */
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
cqspi_controller_enable(cqspi, 1);
}
@@ -1156,6 +1172,12 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
f_pdata->registered = true;
+
+ if (mtd->size <= cqspi->ahb_size) {
+ f_pdata->use_direct_mode = true;
+ dev_dbg(nor->dev, "using direct mode for %s\n",
+ mtd->name);
+ }
}
return 0;
@@ -1215,6 +1237,7 @@ static int cqspi_probe(struct platform_device *pdev)
dev_err(dev, "Cannot remap AHB address.\n");
return PTR_ERR(cqspi->ahb_base);
}
+ cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index f17d22435bfc..2901c7bd9e30 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -801,10 +801,10 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
- { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
- { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
- { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
{ /* sentinel */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index ef034d898a23..699951523179 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -138,7 +138,6 @@
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
- * @preopcodes: Preopcodes which are supported.
*/
struct intel_spi {
struct device *dev;
@@ -155,7 +154,6 @@ struct intel_spi {
bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
- u8 preopcodes[2];
};
static bool writeable;
@@ -400,10 +398,6 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
}
}
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index abe455ccd68b..5442993b71ff 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -110,7 +110,7 @@
#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
-struct mt8173_nor {
+struct mtk_nor {
struct spi_nor nor;
struct device *dev;
void __iomem *base; /* nor flash base address */
@@ -118,48 +118,48 @@ struct mt8173_nor {
struct clk *nor_clk;
};
-static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
{
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
switch (nor->read_proto) {
case SNOR_PROTO_1_1_1:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
+ writeb(MTK_NOR_FAST_READ, mtk_nor->base +
MTK_NOR_CFG1_REG);
break;
case SNOR_PROTO_1_1_2:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
case SNOR_PROTO_1_1_4:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA4_REG);
- writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
default:
- writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
}
}
-static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
+static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
{
int reg;
u8 val = cmdval & 0x1f;
- writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
- return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
+ writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
+ return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
!(reg & val), 100, 10000);
}
-static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
+ u8 *tx, int txlen, u8 *rx, int rxlen)
{
int len = 1 + txlen + rxlen;
int i, ret, idx;
@@ -167,26 +167,26 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
if (len > MTK_NOR_MAX_SHIFT)
return -EINVAL;
- writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
+ writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
/* start at PRGDATA5, go down to PRGDATA0 */
idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
/* opcode */
- writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
/* program TX data */
for (i = 0; i < txlen; i++, idx--)
- writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
/* clear out rest of TX registers */
while (idx >= 0) {
- writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
}
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
if (ret)
return ret;
@@ -195,20 +195,20 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
/* read out RX data */
for (i = 0; i < rxlen; i++, idx--)
- rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
+ rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
return 0;
}
/* Do a WRSR (Write Status Register) command */
-static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
{
- writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
- writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
+ writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
+ writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
}
-static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
{
u8 reg;
@@ -216,27 +216,27 @@ static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
* 0: pre-fetch buffer use for read
* 1: pre-fetch buffer use for page program
*/
- writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
0x01 == (reg & 0x01), 100, 10000);
}
-static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
{
u8 reg;
- writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
10000);
}
-static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
{
u8 val;
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
- val = readb(mt8173_nor->base + MTK_NOR_DUAL_REG);
+ val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
switch (nor->addr_width) {
case 3:
@@ -246,115 +246,115 @@ static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
val |= MTK_NOR_4B_ADDR_EN;
break;
default:
- dev_warn(mt8173_nor->dev, "Unexpected address width %u.\n",
+ dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
nor->addr_width);
break;
}
- writeb(val, mt8173_nor->base + MTK_NOR_DUAL_REG);
+ writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
}
-static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
+static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
{
int i;
- mt8173_nor_set_addr_width(mt8173_nor);
+ mtk_nor_set_addr_width(mtk_nor);
for (i = 0; i < 3; i++) {
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
addr >>= 8;
}
/* Last register is non-contiguous */
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
}
-static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- u_char *buffer)
+static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
{
int i, ret;
int addr = (int)from;
u8 *buf = (u8 *)buffer;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
/* set mode for fast read mode ,dual mode or quad mode */
- mt8173_nor_set_read_mode(mt8173_nor);
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_read_mode(mtk_nor);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
if (ret < 0)
return ret;
- buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
+ buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
}
return length;
}
-static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
- int addr, int length, u8 *data)
+static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
+ int addr, int length, u8 *data)
{
int i, ret;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
+ writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
if (ret < 0)
return ret;
}
return 0;
}
-static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
- const u8 *buf)
+static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
+ const u8 *buf)
{
int i, bufidx, data;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
bufidx = 0;
for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
buf[bufidx + 1]<<8 | buf[bufidx];
bufidx += 4;
- writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
+ writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
}
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
}
-static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *buf)
+static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
size_t i;
- ret = mt8173_nor_write_buffer_enable(mt8173_nor);
+ ret = mtk_nor_write_buffer_enable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
return ret;
}
for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
- ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
+ ret = mtk_nor_write_buffer(mtk_nor, to, buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write buffer failed!\n");
+ dev_err(mtk_nor->dev, "write buffer failed!\n");
return ret;
}
to += SFLASH_WRBUF_SIZE;
buf += SFLASH_WRBUF_SIZE;
}
- ret = mt8173_nor_write_buffer_disable(mt8173_nor);
+ ret = mtk_nor_write_buffer_disable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
return ret;
}
if (i < len) {
- ret = mt8173_nor_write_single_byte(mt8173_nor, to,
- (int)(len - i), (u8 *)buf);
+ ret = mtk_nor_write_single_byte(mtk_nor, to,
+ (int)(len - i), (u8 *)buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write single byte failed!\n");
+ dev_err(mtk_nor->dev, "write single byte failed!\n");
return ret;
}
}
@@ -362,72 +362,72 @@ static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_RDSR:
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
if (ret < 0)
return ret;
if (len == 1)
- *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
+ *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
else
- dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
+ dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
break;
}
return ret;
}
-static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_WRSR:
/* We only handle 1 byte */
- ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
+ ret = mtk_nor_wr_sr(mtk_nor, *buf);
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
if (ret)
- dev_warn(mt8173_nor->dev, "write reg failure!\n");
+ dev_warn(mtk_nor->dev, "write reg failure!\n");
break;
}
return ret;
}
-static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
{
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->nor_clk);
}
-static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
{
int ret;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ ret = clk_prepare_enable(mtk_nor->spi_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ ret = clk_prepare_enable(mtk_nor->nor_clk);
if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
return ret;
}
return 0;
}
-static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
const struct spi_nor_hwcaps hwcaps = {
@@ -439,18 +439,18 @@ static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct spi_nor *nor;
/* initialize controller to accept commands */
- writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
+ writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
- nor = &mt8173_nor->nor;
- nor->dev = mt8173_nor->dev;
- nor->priv = mt8173_nor;
+ nor = &mtk_nor->nor;
+ nor->dev = mtk_nor->dev;
+ nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
/* fill the hooks to spi nor */
- nor->read = mt8173_nor_read;
- nor->read_reg = mt8173_nor_read_reg;
- nor->write = mt8173_nor_write;
- nor->write_reg = mt8173_nor_write_reg;
+ nor->read = mtk_nor_read;
+ nor->read_reg = mtk_nor_read_reg;
+ nor->write = mtk_nor_write;
+ nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
@@ -465,34 +465,34 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
struct device_node *flash_np;
struct resource *res;
int ret;
- struct mt8173_nor *mt8173_nor;
+ struct mtk_nor *mtk_nor;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
- mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
- if (!mt8173_nor)
+ mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
+ if (!mtk_nor)
return -ENOMEM;
- platform_set_drvdata(pdev, mt8173_nor);
+ platform_set_drvdata(pdev, mtk_nor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mt8173_nor->base))
- return PTR_ERR(mt8173_nor->base);
+ mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mtk_nor->base))
+ return PTR_ERR(mtk_nor->base);
- mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(mt8173_nor->spi_clk))
- return PTR_ERR(mt8173_nor->spi_clk);
+ mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mtk_nor->spi_clk))
+ return PTR_ERR(mtk_nor->spi_clk);
- mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
- if (IS_ERR(mt8173_nor->nor_clk))
- return PTR_ERR(mt8173_nor->nor_clk);
+ mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(mtk_nor->nor_clk))
+ return PTR_ERR(mtk_nor->nor_clk);
- mt8173_nor->dev = &pdev->dev;
+ mtk_nor->dev = &pdev->dev;
- ret = mt8173_nor_enable_clk(mt8173_nor);
+ ret = mtk_nor_enable_clk(mtk_nor);
if (ret)
return ret;
@@ -503,20 +503,20 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = -ENODEV;
goto nor_free;
}
- ret = mtk_nor_init(mt8173_nor, flash_np);
+ ret = mtk_nor_init(mtk_nor, flash_np);
nor_free:
if (ret)
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return ret;
}
static int mtk_nor_drv_remove(struct platform_device *pdev)
{
- struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
+ struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
@@ -524,18 +524,18 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mtk_nor_suspend(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
static int mtk_nor_resume(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- return mt8173_nor_enable_clk(mt8173_nor);
+ return mtk_nor_enable_clk(mtk_nor);
}
static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index bc266f70a15b..d445a4d3b770 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -330,8 +330,22 @@ static inline int spi_nor_fsr_ready(struct spi_nor *nor)
int fsr = read_fsr(nor);
if (fsr < 0)
return fsr;
- else
- return fsr & FSR_READY;
+
+ if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+ if (fsr & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (fsr & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ return -EIO;
+ }
+
+ return fsr & FSR_READY;
}
static int spi_nor_ready(struct spi_nor *nor)
@@ -552,6 +566,27 @@ erase_err:
return ret;
}
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+ int ret;
+
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (ret < 0)
+ return ret;
+
+ return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -650,7 +685,6 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -714,11 +748,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -735,7 +765,6 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -802,11 +831,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -1020,7 +1045,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -1065,7 +1096,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
- /* Spansion -- single (large) sector size only, at least
+ /* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -1094,6 +1125,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2713,6 +2746,16 @@ static void spi_nor_resume(struct mtd_info *mtd)
dev_err(dev, "resume() failed\n");
}
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 5f03b8c885a9..cde19c99e77b 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -151,7 +151,7 @@ static int read_page(int log)
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
- if (err == -EUCLEAN)
+ if (!err || err == -EUCLEAN)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f7758fb6..766b2c385682 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
/* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index b210fdb31c98..b1fc28f63882 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -99,6 +99,8 @@ struct ubiblock {
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
+static DEFINE_IDR(ubiblock_minor_idr);
+/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
@@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
.init_request = ubiblock_init_request,
};
-static DEFINE_IDR(ubiblock_minor_idr);
-
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
- mutex_unlock(&devices_mutex);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
- mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
mutex_init(&dev->dev_mutex);
@@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
goto out_free_queue;
}
- mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
- mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
return 0;
out_free_queue:
@@ -457,6 +457,8 @@ out_put_disk:
put_disk(dev->gd);
out_free_dev:
kfree(dev);
+out_unlock:
+ mutex_unlock(&devices_mutex);
return ret;
}
@@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
+ int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
- mutex_unlock(&devices_mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
- mutex_unlock(&dev->dev_mutex);
- mutex_unlock(&devices_mutex);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
- mutex_unlock(&devices_mutex);
-
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+
kfree(dev);
return 0;
+
+out_unlock_dev:
+ mutex_unlock(&dev->dev_mutex);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+ return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
@@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
struct ubiblock *next;
struct ubiblock *dev;
+ mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
@@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
ubiblock_cleanup(dev);
kfree(dev);
}
+ mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 136ce05d2328..e941395de3ae 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -535,8 +535,17 @@ static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
int limit, device_pebs;
uint64_t device_size;
- if (!max_beb_per1024)
- return 0;
+ if (!max_beb_per1024) {
+ /*
+ * Since max_beb_per1024 has not been set by the user in either
+ * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
+ * limit if it is supported by the device.
+ */
+ limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
+ if (limit < 0)
+ return 0;
+ return limit;
+ }
/*
* Here we are using size of the entire flash chip and
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 388e46be6ad9..250e30fac61b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -384,7 +384,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
- * leb_write_lock - lock logical eraseblock for writing.
+ * leb_write_trylock - try to lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 4f0bd6b4422a..590d967011bb 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -66,7 +66,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_avalible(struct rb_root *root)
+static int anchor_pebs_available(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 5a832bc79b1b..91705962ba73 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -214,9 +214,8 @@ static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
- p = &av->root.rb_node;
while (*p) {
parent = *p;
@@ -1063,7 +1062,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
- kfree(fm->e[i]);
+ kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 85237cf661f9..3fd8d7ff7a02 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -315,6 +316,10 @@ out_sysfs:
*/
cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b5b8cd6f481c..2052a647220e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -692,7 +692,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
- anchor = !anchor_pebs_avalible(&ubi->free);
+ anchor = !anchor_pebs_available(&ubi->free);
if (anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
@@ -1529,6 +1529,46 @@ static void shutdown_work(struct ubi_device *ubi)
}
/**
+ * erase_aeb - erase a PEB given in UBI attach info PEB
+ * @ubi: UBI device description object
+ * @aeb: UBI attach info PEB
+ * @sync: If true, erase synchronously. Otherwise schedule for erasure
+ */
+static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
+{
+ struct ubi_wl_entry *e;
+ int err;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (sync) {
+ err = sync_erase(ubi, e, false);
+ if (err)
+ goto out_free;
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ } else {
+ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
+ if (err)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ wl_entry_destroy(ubi, e);
+
+ return err;
+}
+
+/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
@@ -1566,17 +1606,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
-
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, false);
+ if (err)
goto out_free;
- }
found_pebs++;
}
@@ -1585,8 +1617,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1605,8 +1639,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1635,6 +1671,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
} else {
+ bool sync = false;
+
/*
* Usually old Fastmap PEBs are scheduled for erasure
* and we don't have to care about them but if we face
@@ -1644,18 +1682,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->lookuptbl[aeb->pnum])
continue;
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
+ /*
+ * The fastmap update code might not find a free PEB for
+ * writing the fastmap anchor to and then reuses the
+ * current fastmap anchor PEB. When this PEB gets erased
+ * and a power cut happens before it is written again we
+ * must make sure that the fastmap attach code doesn't
+ * find any outdated fastmap anchors, hence we erase the
+ * outdated fastmap anchor PEBs synchronously here.
+ */
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
+ sync = true;
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi_assert(!ubi->lookuptbl[e->pnum]);
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, sync);
+ if (err)
goto out_free;
- }
}
found_pebs++;
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index 2aaa3f7f2ba9..a9e2d669acd8 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,7 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_avalible(struct rb_root *root);
+static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 9497f18eaba0..2f91ce8dc614 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -123,7 +123,8 @@ enum mac8390_access {
};
extern int mac8390_memtest(struct net_device *dev);
-static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
+static int mac8390_initdev(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type);
static int mac8390_open(struct net_device *dev);
@@ -169,11 +170,11 @@ static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
static u32 mac8390_msg_enable;
-static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
+static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres)
{
- switch (dev->dr_sw) {
+ switch (fres->dr_sw) {
case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_APPLE_SONIC_NB:
case NUBUS_DRHW_APPLE_SONIC_LC:
case NUBUS_DRHW_SONNET:
@@ -184,7 +185,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_ASANTE_LC:
return MAC8390_NONE;
case NUBUS_DRHW_CABLETRON:
@@ -201,7 +202,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
case NUBUS_DRSW_TECHWORKS:
case NUBUS_DRSW_DAYNA2:
case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ if (fres->dr_hw == NUBUS_DRHW_CABLETRON)
return MAC8390_CABLETRON;
else
return MAC8390_APPLE;
@@ -212,7 +213,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_INTERLAN:
return MAC8390_INTERLAN;
default:
@@ -225,8 +226,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
* These correspond to Dayna Sonic cards
* which use the macsonic driver
*/
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 ||
+ fres->dr_hw == NUBUS_DRHW_INTERLAN)
return MAC8390_NONE;
else
return MAC8390_DAYNA;
@@ -289,7 +290,8 @@ static int __init mac8390_memsize(unsigned long membase)
return i * 0x1000;
}
-static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+static bool __init mac8390_init(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type cardtype)
{
struct nubus_dir dir;
@@ -394,7 +396,7 @@ static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- struct nubus_dev *ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
int err = -ENODEV;
struct ei_device *ei_local;
@@ -414,8 +416,11 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
- ndev))) {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1 << ndev->board->slot))
continue;
@@ -489,7 +494,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
};
static int __init mac8390_initdev(struct net_device *dev,
- struct nubus_dev *ndev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type)
{
static u32 fwrd4_offsets[16] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 429467364219..9040e13ce4b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -58,6 +58,13 @@
extern struct list_head adapter_list;
extern struct mutex uld_mutex;
+/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -565,6 +572,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
};
struct rx_sw_desc;
@@ -981,6 +989,11 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
@@ -1739,6 +1752,16 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
void free_txq(struct adapter *adap, struct sge_txq *q);
+void cxgb4_reclaim_completed_tx(struct adapter *adap,
+ struct sge_txq *q, bool unmap);
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr);
+void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos);
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr);
+void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 4ea76c1411dc..2822bbff73e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2906,6 +2906,8 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1e3cd8abc56d..1ca2a39ed0f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4294,7 +4294,7 @@ static int adap_init0(struct adapter *adap)
} else {
adap->vres.ncrypto_fc = val[0];
}
- adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
+ adap->params.crypto = ntohs(caps_cmd.cryptocaps);
adap->num_uld += 1;
}
#undef FW_PARAM_PFVF
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 71a315bc1409..6b5fea4532f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -637,6 +637,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->crypto = adap->params.crypto;
lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 08e709ab6dd4..1d37672902da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -297,6 +297,7 @@ struct chcr_stats_debug {
atomic_t complete;
atomic_t error;
atomic_t fallback;
+ atomic_t ipsec_cnt;
};
#define OCQ_WIN_OFFSET(pdev, vres) \
@@ -322,6 +323,7 @@ struct cxgb4_lld_info {
unsigned char wr_cred; /* WR 16-byte credits */
unsigned char adapter_type; /* type of adapter */
unsigned char fw_api_ver; /* FW API version */
+ unsigned char crypto; /* crypto support */
unsigned int fw_vers; /* FW version */
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned int cclk_ps; /* Core clock period in psec */
@@ -370,6 +372,7 @@ struct cxgb4_uld_info {
struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
+ int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a7af71bf14fb..6e310a0da7c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -41,6 +41,7 @@
#include <linux/jiffies.h>
#include <linux/prefetch.h>
#include <linux/export.h>
+#include <net/xfrm.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/busy_poll.h>
@@ -53,6 +54,7 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
+#include "cxgb4_uld.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -110,14 +112,6 @@
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/*
- * Suspend an Ethernet Tx queue with fewer available descriptors than this.
- * This is the same as calc_tx_descs() for a TSO packet with
- * nr_frags == MAX_SKB_FRAGS.
- */
-#define ETHTXQ_STOP_THRES \
- (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
-
-/*
* Suspension threshold for non-Ethernet Tx queues. We require enough room
* for a full sized WR.
*/
@@ -134,11 +128,6 @@
*/
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
struct rx_sw_desc { /* SW state per Rx descriptor */
struct page *page;
dma_addr_t dma_addr;
@@ -248,8 +237,8 @@ static inline bool fl_starving(const struct adapter *adapter,
return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
-static int map_skb(struct device *dev, const struct sk_buff *skb,
- dma_addr_t *addr)
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
@@ -277,6 +266,7 @@ unwind:
out_err:
return -ENOMEM;
}
+EXPORT_SYMBOL(cxgb4_map_skb);
#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
@@ -411,7 +401,7 @@ static inline int reclaimable(const struct sge_txq *q)
}
/**
- * reclaim_completed_tx - reclaims completed Tx descriptors
+ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
* @unmap: whether the buffers should be unmapped for DMA
@@ -420,7 +410,7 @@ static inline int reclaimable(const struct sge_txq *q)
* and frees the associated buffers if possible. Called with the Tx
* queue locked.
*/
-static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
bool unmap)
{
int avail = reclaimable(q);
@@ -437,6 +427,7 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
q->in_use -= avail;
}
}
+EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
static inline int get_buf_size(struct adapter *adapter,
const struct rx_sw_desc *d)
@@ -849,7 +840,7 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
}
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
* @sgl: starting location for writing the SGL
@@ -865,9 +856,9 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
* right after the end of the SGL but does not account for any potential
* wrap around, i.e., @end > @sgl.
*/
-static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
- struct ulptx_sgl *sgl, u64 *end, unsigned int start,
- const dma_addr_t *addr)
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
{
unsigned int i, len;
struct ulptx_sge_pair *to;
@@ -919,6 +910,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
+EXPORT_SYMBOL(cxgb4_write_sgl);
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
@@ -937,14 +929,14 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
}
/**
- * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
* Ring the doorbel for a Tx queue.
*/
-static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
/* Make sure that all writes to the TX Descriptors are committed
* before we tell the hardware about them.
@@ -1011,9 +1003,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb();
}
}
+EXPORT_SYMBOL(cxgb4_ring_tx_db);
/**
- * inline_tx_skb - inline a packet's data into Tx descriptors
+ * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
* @skb: the packet
* @q: the Tx queue where the packet will be inlined
* @pos: starting position in the Tx queue where to inline the packet
@@ -1023,8 +1016,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* Most of the complexity of this operation is dealing with wrap arounds
* in the middle of the packet we want to inline.
*/
-static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
- void *pos)
+void cxgb4_inline_tx_skb(const struct sk_buff *skb,
+ const struct sge_txq *q, void *pos)
{
u64 *p;
int left = (void *)q->stat - pos;
@@ -1046,6 +1039,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
if ((uintptr_t)p & 8)
*p = 0;
}
+EXPORT_SYMBOL(cxgb4_inline_tx_skb);
static void *inline_tx_skb_header(const struct sk_buff *skb,
const struct sge_txq *q, void *pos,
@@ -1317,6 +1311,12 @@ out_free: dev_kfree_skb_any(skb);
pi = netdev_priv(dev);
adap = pi->adapter;
+ ssi = skb_shinfo(skb);
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (xfrm_offload(skb) && !ssi->gso_size)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_IPSEC_INLINE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
@@ -1333,7 +1333,7 @@ out_free: dev_kfree_skb_any(skb);
}
skb_tx_timestamp(skb);
- reclaim_completed_tx(adap, &q->q, true);
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
@@ -1367,7 +1367,7 @@ out_free: dev_kfree_skb_any(skb);
tnl_type = cxgb_encap_offload_supported(skb);
if (!immediate &&
- unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1386,7 +1386,6 @@ out_free: dev_kfree_skb_any(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
- ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
@@ -1488,13 +1487,13 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (immediate) {
- inline_tx_skb(skb, &q->q, cpl + 1);
+ cxgb4_inline_tx_skb(skb, &q->q, cpl + 1);
dev_consume_skb_any(skb);
} else {
int last_desc;
- write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
- addr);
+ cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1),
+ end, 0, addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
@@ -1506,7 +1505,7 @@ out_free: dev_kfree_skb_any(skb);
txq_advance(&q->q, ndesc);
- ring_tx_db(adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
@@ -1516,9 +1515,9 @@ out_free: dev_kfree_skb_any(skb);
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
*
- * This is a variant of reclaim_completed_tx() that is used for Tx queues
- * that send only immediate data (presently just the control queues) and
- * thus do not have any sk_buffs to release.
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
@@ -1593,13 +1592,13 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
}
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
ctrlq_check_stop(q, wr);
- ring_tx_db(q->adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
spin_unlock(&q->sendq.lock);
kfree_skb(skb);
@@ -1634,7 +1633,7 @@ static void restart_ctrlq(unsigned long data)
txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
@@ -1647,14 +1646,15 @@ static void restart_ctrlq(unsigned long data)
}
}
if (written > 16) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
spin_lock(&q->sendq.lock);
}
q->full = 0;
-ringdb: if (written)
- ring_tx_db(q->adap, &q->q, written);
+ringdb:
+ if (written)
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
spin_unlock(&q->sendq.lock);
}
@@ -1797,7 +1797,7 @@ static void service_ofldq(struct sge_uld_txq *q)
*/
spin_unlock(&q->sendq.lock);
- reclaim_completed_tx(q->adap, &q->q, false);
+ cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
flits = skb->priority; /* previously saved */
ndesc = flits_to_desc(flits);
@@ -1808,9 +1808,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (u64 *)&q->q.desc[q->q.pidx];
if (is_ofld_imm(skb))
- inline_tx_skb(skb, &q->q, pos);
- else if (map_skb(q->adap->pdev_dev, skb,
- (dma_addr_t *)skb->head)) {
+ cxgb4_inline_tx_skb(skb, &q->q, pos);
+ else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
txq_stop_maperr(q);
spin_lock(&q->sendq.lock);
break;
@@ -1841,9 +1841,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (void *)txq->desc;
}
- write_sgl(skb, &q->q, (void *)pos,
- end, hdr_len,
- (dma_addr_t *)skb->head);
+ cxgb4_write_sgl(skb, &q->q, (void *)pos,
+ end, hdr_len,
+ (dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0];
skb->destructor = deferred_unmap_destructor;
@@ -1857,7 +1857,7 @@ static void service_ofldq(struct sge_uld_txq *q)
txq_advance(&q->q, ndesc);
written += ndesc;
if (unlikely(written > 32)) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
@@ -1872,7 +1872,7 @@ static void service_ofldq(struct sge_uld_txq *q)
kfree_skb(skb);
}
if (likely(written))
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
/*Indicate that no thread is processing the Pending Send Queue
* currently.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index f88766d2401d..0d83b4064a78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -513,6 +513,13 @@ struct fw_ulptx_wr {
u64 cookie;
};
+#define FW_ULPTX_WR_DATA_S 28
+#define FW_ULPTX_WR_DATA_M 0x1
+#define FW_ULPTX_WR_DATA_V(x) ((x) << FW_ULPTX_WR_DATA_S)
+#define FW_ULPTX_WR_DATA_G(x) \
+ (((x) >> FW_ULPTX_WR_DATA_S) & FW_ULPTX_WR_DATA_M)
+#define FW_ULPTX_WR_DATA_F FW_ULPTX_WR_DATA_V(1U)
+
struct fw_tp_wr {
__be32 op_to_immdlen;
__be32 flowid_len16;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index f910f0f386d6..977d4c2c759d 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -187,6 +187,7 @@ struct net_device * __init mac89x0_probe(int unit)
unsigned long ioaddr;
unsigned short sig;
int err = -ENODEV;
+ struct nubus_rsrc *fres;
if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV);
@@ -207,8 +208,9 @@ struct net_device * __init mac89x0_probe(int unit)
/* We might have to parameterize this later */
slot = 0xE;
/* Get out now if there's a real NuBus card in slot E */
- if (nubus_find_slot(slot, NULL) != NULL)
- goto out;
+ for_each_func_rsrc(fres)
+ if (fres->board->slot == slot)
+ goto out;
/* The pseudo-ISA bits always live at offset 0x300 (gee,
wonder why...) */
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 769598f7b6c8..3aaf4bad6c5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
u64 in_param = 0;
int err;
+ if (!cnt)
+ return;
+
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 4d98ce0901af..05c1157bc9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -417,7 +417,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
mlx5_cq_completion(dev, cqn);
break;
-
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
+ mlx5_rsc_event(dev, rsn, eqe->type);
+ break;
case MLX5_EVENT_TYPE_PATH_MIG:
case MLX5_EVENT_TYPE_COMM_EST:
case MLX5_EVENT_TYPE_SQ_DRAINED:
@@ -733,6 +737,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, fpga))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4392f741c5f..e6175f8ac0e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -688,7 +688,7 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
@@ -727,7 +727,7 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, next_rcv_psn,
MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
@@ -888,7 +888,8 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
MLX5_ROCE_VERSION_2,
MLX5_ROCE_L3_TYPE_IPV6,
- remote_ip, remote_mac, true, 0);
+ remote_ip, remote_mac, true, 0,
+ MLX5_FPGA_PORT_NUM);
if (err) {
mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
ret = ERR_PTR(err);
@@ -954,7 +955,7 @@ err_cq:
mlx5_fpga_conn_destroy_cq(conn);
err_gid:
mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
- NULL, false, 0);
+ NULL, false, 0, MLX5_FPGA_PORT_NUM);
err_rsvd_gid:
mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
err:
@@ -982,7 +983,7 @@ void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
mlx5_fpga_conn_destroy_cq(conn);
mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
- NULL, NULL, false, 0);
+ NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
kfree(conn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 5ef1b56b6a96..9d11e92fb541 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -195,12 +195,20 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return 0;
}
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
{
u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
+ int i;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+
+ if (MLX5_CAP_GEN(dev, sw_owner_id)) {
+ for (i = 0; i < 4; i++)
+ MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i,
+ sw_owner_id[i]);
+ }
+
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1f50b77a081d..f953378bd13d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -188,7 +188,7 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp
MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
- MLX5_SET(ads, addr_path, port, 1);
+ MLX5_SET(ads, addr_path, vhca_port_num, 1);
MLX5_SET(ads, addr_path, grh, 1);
ret = mlx5_core_create_qp(mdev, qp, in, inlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 5701f125e99c..e159243e0fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -31,6 +31,8 @@
*/
#include <linux/clocksource.h>
+#include <linux/highmem.h>
+#include <rdma/mlx5-abi.h>
#include "en.h"
enum {
@@ -71,6 +73,28 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(mdev) & cc->mask;
}
+static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 sign;
+
+ if (!clock_info)
+ return;
+
+ sign = smp_load_acquire(&clock_info->sign);
+ smp_store_mb(clock_info->sign,
+ sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
+
+ clock_info->cycles = clock->tc.cycle_last;
+ clock_info->mult = clock->cycles.mult;
+ clock_info->nsec = clock->tc.nsec;
+ clock_info->frac = clock->tc.frac;
+
+ smp_store_release(&clock_info->sign,
+ sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
+}
+
static void mlx5_pps_out(struct work_struct *work)
{
struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
@@ -109,6 +133,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_lock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
@@ -123,6 +148,7 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
write_lock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -152,6 +178,7 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
write_lock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -179,6 +206,7 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -474,6 +502,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
+ clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
@@ -486,6 +515,25 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
do_div(ns, NSEC_PER_SEC / 2 / HZ);
clock->overflow_period = ns;
+ mdev->clock_info_page = alloc_page(GFP_KERNEL);
+ if (mdev->clock_info_page) {
+ mdev->clock_info = kmap(mdev->clock_info_page);
+ if (!mdev->clock_info) {
+ __free_page(mdev->clock_info_page);
+ mlx5_core_warn(mdev, "failed to map clock page\n");
+ } else {
+ mdev->clock_info->sign = 0;
+ mdev->clock_info->nsec = clock->tc.nsec;
+ mdev->clock_info->cycles = clock->tc.cycle_last;
+ mdev->clock_info->mask = clock->cycles.mask;
+ mdev->clock_info->mult = clock->nominal_c_mult;
+ mdev->clock_info->shift = clock->cycles.shift;
+ mdev->clock_info->frac = clock->tc.frac;
+ mdev->clock_info->overflow_period =
+ clock->overflow_period;
+ }
+ }
+
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
if (clock->overflow_period)
@@ -525,5 +573,12 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
cancel_work_sync(&clock->pps_info.out_work);
cancel_delayed_work_sync(&clock->overflow_work);
+
+ if (mdev->clock_info) {
+ kunmap(mdev->clock_info_page);
+ __free_page(mdev->clock_info_page);
+ mdev->clock_info = NULL;
+ }
+
kfree(clock->ptp_info.pin_config);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 573f59f46d41..7722a3f9bb68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -121,7 +121,7 @@ EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id)
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
@@ -148,6 +148,9 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
memcpy(addr_l3_addr, gid, gidsz);
}
+ if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
+ MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
+
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0f88fd30a09a..2ef641c91c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -75,6 +75,8 @@ static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static u32 sw_owner_id[4];
+
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
@@ -551,6 +553,15 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
cache_line_128byte,
cache_line_size() == 128 ? 1 : 0);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
+
+ if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
+ MLX5_SET(cmd_hca_cap,
+ set_hca_cap,
+ num_vhca_ports,
+ MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
+
err = set_caps(dev, set_ctx, set_sz,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
@@ -1107,7 +1118,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = mlx5_cmd_init_hca(dev);
+ err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
dev_err(&pdev->dev, "init hca failed\n");
goto err_pagealloc_stop;
@@ -1643,6 +1654,8 @@ static int __init init(void)
{
int err;
+ get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
+
mlx5_core_verify_params();
mlx5_register_debugfs();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b5a46c128b28..394552f36fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -86,7 +86,7 @@ enum {
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 889130edb715..02d6c5b5d502 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -98,6 +98,11 @@ static u64 sq_allowed_event_types(void)
return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
}
+static u64 dct_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
+}
+
static bool is_event_type_allowed(int rsc_type, int event_type)
{
switch (rsc_type) {
@@ -107,6 +112,8 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
return BIT(event_type) & rq_allowed_event_types();
case MLX5_EVENT_QUEUE_TYPE_SQ:
return BIT(event_type) & sq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_DCT:
+ return BIT(event_type) & dct_allowed_event_types();
default:
WARN(1, "Event arrived for unknown resource type");
return false;
@@ -116,6 +123,7 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
{
struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+ struct mlx5_core_dct *dct;
struct mlx5_core_qp *qp;
if (!common)
@@ -134,7 +142,11 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
qp = (struct mlx5_core_qp *)common;
qp->event(qp, event_type);
break;
-
+ case MLX5_RES_DCT:
+ dct = (struct mlx5_core_dct *)common;
+ if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
+ complete(&dct->drained);
+ break;
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
@@ -142,9 +154,9 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
-static int create_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
+static int create_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp,
+ int rsc_type)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
int err;
@@ -165,8 +177,8 @@ static int create_qprqsq_common(struct mlx5_core_dev *dev,
return 0;
}
-static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
+static void destroy_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
unsigned long flags;
@@ -179,6 +191,40 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
+ u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ init_completion(&dct->drained);
+ MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
+ return err;
+ }
+
+ qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ err = create_resource_common(dev, qp, MLX5_RES_DCT);
+ if (err)
+ goto err_cmd;
+
+ return 0;
+err_cmd:
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
+ mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
+ (void *)&out, sizeof(dout));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in, int inlen)
@@ -197,7 +243,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
- err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
+ err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err)
goto err_cmd;
@@ -220,6 +266,47 @@ err_cmd:
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
+ MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+}
+
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ goto destroy;
+ } else {
+ mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
+ return err;
+ }
+ }
+ wait_for_completion(&dct->drained);
+destroy:
+ destroy_resource_common(dev, &dct->mqp);
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
+
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -229,7 +316,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
mlx5_debug_qp_remove(dev, qp);
- destroy_qprqsq_common(dev, qp);
+ destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
@@ -405,6 +492,20 @@ int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
+ MLX5_SET(query_dct_in, in, dctn, qp->qpn);
+
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)out, outlen);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
+
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
@@ -441,7 +542,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
rq->qpn = rqn;
- err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ err = create_resource_common(dev, rq, MLX5_RES_RQ);
if (err)
goto err_destroy_rq;
@@ -457,7 +558,7 @@ EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq)
{
- destroy_qprqsq_common(dev, rq);
+ destroy_resource_common(dev, rq);
mlx5_core_destroy_rq(dev, rq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
@@ -473,7 +574,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
sq->qpn = sqn;
- err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
goto err_destroy_sq;
@@ -489,7 +590,7 @@ EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq)
{
- destroy_qprqsq_common(dev, sq);
+ destroy_resource_common(dev, sq);
mlx5_core_destroy_sq(dev, sq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index a1296a62497d..dfe36cf6fbea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,9 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+/* Mutex to hold while enabling or disabling RoCE */
+static DEFINE_MUTEX(mlx5_roce_en_lock);
+
static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
@@ -998,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_inc_return(&mdev->roce.roce_en) != 1)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (!mdev->roce.roce_en)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+
+ if (!err)
+ mdev->roce.roce_en++;
+ mutex_unlock(&mlx5_roce_en_lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_dec_return(&mdev->roce.roce_en) != 0)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (mdev->roce.roce_en) {
+ mdev->roce.roce_en--;
+ if (mdev->roce.roce_en == 0)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+
+ if (err)
+ mdev->roce.roce_en++;
+ }
+ mutex_unlock(&mlx5_roce_en_lock);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
@@ -1110,3 +1131,61 @@ ex:
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ err = mlx5_nic_vport_enable_roce(port_mdev);
+ if (err)
+ goto free;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria,
+ MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+free:
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
+
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id, 0);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria, 0);
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (!err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a42433fb6949..b922ab5cedea 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -311,7 +311,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
int sr;
- int commslot = 0;
+ bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM;
if (!MACH_IS_MAC)
return -ENODEV;
@@ -322,10 +322,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
documentation is to be believed) */
- if (macintosh_config->ident == MAC_MODEL_Q630 ||
- macintosh_config->ident == MAC_MODEL_P588 ||
- macintosh_config->ident == MAC_MODEL_P575 ||
- macintosh_config->ident == MAC_MODEL_C610) {
+ if (commslot || macintosh_config->ident == MAC_MODEL_C610) {
int card_present;
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
@@ -333,7 +330,6 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
printk("none.\n");
return -ENODEV;
}
- commslot = 1;
}
printk("yes\n");
@@ -428,26 +424,26 @@ static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
return 0;
}
-static int macsonic_ident(struct nubus_dev *ndev)
+static int macsonic_ident(struct nubus_rsrc *fres)
{
- if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
- ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
+ if (fres->dr_hw == NUBUS_DRHW_ASANTE_LC &&
+ fres->dr_sw == NUBUS_DRSW_SONIC_LC)
return MACSONIC_DAYNALINK;
- if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
- ndev->dr_sw == NUBUS_DRSW_APPLE) {
+ if (fres->dr_hw == NUBUS_DRHW_SONIC &&
+ fres->dr_sw == NUBUS_DRSW_APPLE) {
/* There has to be a better way to do this... */
- if (strstr(ndev->board->name, "DuoDock"))
+ if (strstr(fres->board->name, "DuoDock"))
return MACSONIC_DUODOCK;
else
return MACSONIC_APPLE;
}
- if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
- ndev->dr_sw == NUBUS_DRSW_DAYNA)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 &&
+ fres->dr_sw == NUBUS_DRSW_DAYNA)
return MACSONIC_DAYNA;
- if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
- ndev->dr_sw == 0) { /* huh? */
+ if (fres->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
+ fres->dr_sw == 0) { /* huh? */
return MACSONIC_APPLE16;
}
return -1;
@@ -456,7 +452,7 @@ static int macsonic_ident(struct nubus_dev *ndev)
static int mac_nubus_sonic_probe(struct net_device *dev)
{
static int slots;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
@@ -464,9 +460,11 @@ static int mac_nubus_sonic_probe(struct net_device *dev)
int reg_offset, dma_bitmode;
/* Find the first SONIC that hasn't been initialized already */
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
- NUBUS_TYPE_ETHERNET, ndev)) != NULL)
- {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1<<ndev->board->slot))
continue;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 3e57bf5d3d03..1673fc90027f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -97,9 +97,7 @@ static int __qed_spq_block(struct qed_hwfn *p_hwfn,
while (iter_cnt--) {
/* Validate we receive completion update */
- if (READ_ONCE(comp_done->done) == 1) {
- /* Read updated FW return value */
- smp_read_barrier_depends();
+ if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 7900ed066d8a..e412dfdda7dd 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2638,12 +2638,12 @@ static long ca8210_test_int_ioctl(
*
* Return: set of poll return flags
*/
-static unsigned int ca8210_test_int_poll(
+static __poll_t ca8210_test_int_poll(
struct file *filp,
struct poll_table_struct *ptable
)
{
- unsigned int return_flags = 0;
+ __poll_t return_flags = 0;
struct ca8210_priv *priv = filp->private_data;
poll_wait(filp, &priv->test.readq, ptable);
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 1b28e6e702f5..bdc4d23627c5 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -334,7 +334,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 264d4af0bf69..ef6b2126b23a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -531,10 +531,10 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int ppp_poll(struct file *file, poll_table *wait)
+static __poll_t ppp_poll(struct file *file, poll_table *wait)
{
struct ppp_file *pf = file->private_data;
- unsigned int mask;
+ __poll_t mask;
if (!pf)
return 0;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 7196f00f0991..047f6c68a441 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -327,7 +327,7 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 77872699c45d..0a5ed004781c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -569,10 +569,10 @@ static int tap_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int tap_poll(struct file *file, poll_table *wait)
+static __poll_t tap_poll(struct file *file, poll_table *wait)
{
struct tap_queue *q = file->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (!q)
goto out;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a0c5cb1a1617..0dc66e4fbb2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1428,12 +1428,12 @@ static void tun_net_init(struct net_device *dev)
/* Character device part */
/* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
+static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
struct sock *sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!tun)
return POLLERR;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 6ea16260ec76..f6b000ddcd15 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -924,7 +924,7 @@ static int chrdev_tx_done(struct channel_data *chan, int size)
return 1;
}
-static unsigned int cosa_poll(struct file *file, poll_table *poll)
+static __poll_t cosa_poll(struct file *file, poll_table *poll)
{
pr_info("cosa_poll is here\n");
return 0;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index f4fdad2ed319..72c55d1f8903 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -301,7 +301,7 @@ exit:
return status;
}
-static unsigned int rt2x00debug_poll_queue_dump(struct file *file,
+static __poll_t rt2x00debug_poll_queue_dump(struct file *file,
poll_table *wait)
{
struct rt2x00debug_intf *intf = file->private_data;
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile
index 21bda2031e7e..6d063cde39d1 100644
--- a/drivers/nubus/Makefile
+++ b/drivers/nubus/Makefile
@@ -2,6 +2,6 @@
# Makefile for the nubus specific drivers.
#
-obj-y := nubus.o
+obj-y := nubus.o bus.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
new file mode 100644
index 000000000000..d306c348c857
--- /dev/null
+++ b/drivers/nubus/bus.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Bus implementation for the NuBus subsystem.
+//
+// Copyright (C) 2017 Finn Thain
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/nubus.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#define to_nubus_board(d) container_of(d, struct nubus_board, dev)
+#define to_nubus_driver(d) container_of(d, struct nubus_driver, driver)
+
+static int nubus_bus_match(struct device *dev, struct device_driver *driver)
+{
+ return 1;
+}
+
+static int nubus_device_probe(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (ndrv->probe)
+ err = ndrv->probe(to_nubus_board(dev));
+ return err;
+}
+
+static int nubus_device_remove(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (dev->driver && ndrv->remove)
+ err = ndrv->remove(to_nubus_board(dev));
+ return err;
+}
+
+struct bus_type nubus_bus_type = {
+ .name = "nubus",
+ .match = nubus_bus_match,
+ .probe = nubus_device_probe,
+ .remove = nubus_device_remove,
+};
+EXPORT_SYMBOL(nubus_bus_type);
+
+int nubus_driver_register(struct nubus_driver *ndrv)
+{
+ ndrv->driver.bus = &nubus_bus_type;
+ return driver_register(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_register);
+
+void nubus_driver_unregister(struct nubus_driver *ndrv)
+{
+ driver_unregister(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_unregister);
+
+static struct device nubus_parent = {
+ .init_name = "nubus",
+};
+
+int __init nubus_bus_register(void)
+{
+ int err;
+
+ err = device_register(&nubus_parent);
+ if (err)
+ return err;
+
+ err = bus_register(&nubus_bus_type);
+ if (!err)
+ return 0;
+
+ device_unregister(&nubus_parent);
+ return err;
+}
+
+static void nubus_device_release(struct device *dev)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct nubus_rsrc *fres, *tmp;
+
+ list_for_each_entry_safe(fres, tmp, &nubus_func_rsrcs, list)
+ if (fres->board == board) {
+ list_del(&fres->list);
+ kfree(fres);
+ }
+ kfree(board);
+}
+
+int nubus_device_register(struct nubus_board *board)
+{
+ board->dev.parent = &nubus_parent;
+ board->dev.release = nubus_device_release;
+ board->dev.bus = &nubus_bus_type;
+ dev_set_name(&board->dev, "slot.%X", board->slot);
+ return device_register(&board->dev);
+}
+
+static int nubus_print_device_name_fn(struct device *dev, void *data)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct seq_file *m = data;
+
+ seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ return 0;
+}
+
+int nubus_proc_show(struct seq_file *m, void *data)
+{
+ return bus_for_each_dev(&nubus_bus_type, NULL, m,
+ nubus_print_device_name_fn);
+}
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index b793727cd4f7..4621ff98138c 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -31,8 +32,7 @@
/* Globals */
-struct nubus_dev *nubus_devices;
-struct nubus_board *nubus_boards;
+LIST_HEAD(nubus_func_rsrcs);
/* Meaning of "bytelanes":
@@ -146,7 +146,7 @@ static inline void *nubus_rom_addr(int slot)
return (void *)(0xF1000000 + (slot << 24));
}
-static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
{
unsigned char *p = nd->base;
@@ -161,7 +161,7 @@ static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
pointed to with offsets) out of the card ROM. */
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
- int len)
+ unsigned int len)
{
unsigned char *t = (unsigned char *)dest;
unsigned char *p = nubus_dirptr(dirent);
@@ -173,21 +173,49 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
}
EXPORT_SYMBOL(nubus_get_rsrc_mem);
-void nubus_get_rsrc_str(void *dest, const struct nubus_dirent *dirent,
- int len)
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len)
{
- unsigned char *t = (unsigned char *)dest;
+ char *t = dest;
unsigned char *p = nubus_dirptr(dirent);
- while (len) {
- *t = nubus_get_rom(&p, 1, dirent->mask);
- if (!*t++)
+ while (len > 1) {
+ unsigned char c = nubus_get_rom(&p, 1, dirent->mask);
+
+ if (!c)
break;
+ *t++ = c;
len--;
}
+ if (len > 0)
+ *t = '\0';
+ return t - dest;
}
EXPORT_SYMBOL(nubus_get_rsrc_str);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len)
+{
+ unsigned long buf[32];
+ unsigned int buf_size = sizeof(buf);
+ unsigned char *p = nubus_dirptr(dirent);
+
+ /* If possible, write out full buffers */
+ while (len >= buf_size) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ buf[i] = nubus_get_rom(&p, sizeof(buf[0]),
+ dirent->mask);
+ seq_write(m, buf, buf_size);
+ len -= buf_size;
+ }
+ /* If not, write out individual bytes */
+ while (len--)
+ seq_putc(m, nubus_get_rom(&p, 1, dirent->mask));
+}
+
int nubus_get_root_dir(const struct nubus_board *board,
struct nubus_dir *dir)
{
@@ -199,12 +227,11 @@ int nubus_get_root_dir(const struct nubus_board *board,
EXPORT_SYMBOL(nubus_get_root_dir);
/* This is a slyly renamed version of the above */
-int nubus_get_func_dir(const struct nubus_dev *dev,
- struct nubus_dir *dir)
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir)
{
- dir->ptr = dir->base = dev->directory;
+ dir->ptr = dir->base = fres->directory;
dir->done = 0;
- dir->mask = dev->board->lanes;
+ dir->mask = fres->board->lanes;
return 0;
}
EXPORT_SYMBOL(nubus_get_func_dir);
@@ -277,51 +304,20 @@ EXPORT_SYMBOL(nubus_rewinddir);
/* Driver interface functions, more or less like in pci.c */
-struct nubus_dev*
-nubus_find_device(unsigned short category, unsigned short type,
- unsigned short dr_hw, unsigned short dr_sw,
- const struct nubus_dev *from)
-{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type &&
- itor->dr_hw == dr_hw && itor->dr_sw == dr_sw)
- return itor;
- itor = itor->next;
- }
- return NULL;
-}
-EXPORT_SYMBOL(nubus_find_device);
-
-struct nubus_dev*
-nubus_find_type(unsigned short category, unsigned short type,
- const struct nubus_dev *from)
+struct nubus_rsrc *nubus_first_rsrc_or_null(void)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ return list_first_entry_or_null(&nubus_func_rsrcs, struct nubus_rsrc,
+ list);
}
-EXPORT_SYMBOL(nubus_find_type);
+EXPORT_SYMBOL(nubus_first_rsrc_or_null);
-struct nubus_dev*
-nubus_find_slot(unsigned int slot, const struct nubus_dev *from)
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->board->slot == slot)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ if (list_is_last(&from->list, &nubus_func_rsrcs))
+ return NULL;
+ return list_next_entry(from, list);
}
-EXPORT_SYMBOL(nubus_find_slot);
+EXPORT_SYMBOL(nubus_next_rsrc_or_null);
int
nubus_find_rsrc(struct nubus_dir *dir, unsigned char rsrc_type,
@@ -339,31 +335,83 @@ EXPORT_SYMBOL(nubus_find_rsrc);
looking at, and print out lots and lots of information from the
resource blocks. */
-/* FIXME: A lot of this stuff will eventually be useful after
- initialization, for intelligently probing Ethernet and video chips,
- among other things. The rest of it should go in the /proc code.
- For now, we just use it to give verbose boot logs. */
+static int __init nubus_get_block_rsrc_dir(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
-static int __init nubus_show_display_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+ while (nubus_readdir(&dir, &ent) != -1) {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type, size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_vidmode(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
+
+ while (nubus_readdir(&dir, &ent) != -1) {
+ switch (ent.type) {
+ case 1: /* mVidParams */
+ case 2: /* mTable */
+ {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type,
+ size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ break;
+ }
+ default:
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
+ }
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_GAMMADIR:
- pr_info(" gamma directory offset: 0x%06x\n", ent->data);
+ pr_debug(" gamma directory offset: 0x%06x\n", ent->data);
+ nubus_get_block_rsrc_dir(fres->board, procdir, ent);
break;
case 0x0080 ... 0x0085:
- pr_info(" mode %02X info offset: 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" mode 0x%02x info offset: 0x%06x\n",
+ ent->type, ent->data);
+ nubus_get_display_vidmode(fres->board, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_network_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_network_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MAC_ADDRESS:
@@ -371,18 +419,21 @@ static int __init nubus_show_network_resource(struct nubus_dev *dev,
char addr[6];
nubus_get_rsrc_mem(addr, ent, 6);
- pr_info(" MAC address: %pM\n", addr);
+ pr_debug(" MAC address: %pM\n", addr);
+ nubus_proc_add_rsrc_mem(procdir, ent, 6);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_cpu_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MEMINFO:
@@ -390,8 +441,9 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long meminfo[2];
nubus_get_rsrc_mem(&meminfo, ent, 8);
- pr_info(" memory: [ 0x%08lx 0x%08lx ]\n",
- meminfo[0], meminfo[1]);
+ pr_debug(" memory: [ 0x%08lx 0x%08lx ]\n",
+ meminfo[0], meminfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
case NUBUS_RESID_ROMINFO:
@@ -399,57 +451,60 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long rominfo[2];
nubus_get_rsrc_mem(&rominfo, ent, 8);
- pr_info(" ROM: [ 0x%08lx 0x%08lx ]\n",
- rominfo[0], rominfo[1]);
+ pr_debug(" ROM: [ 0x%08lx 0x%08lx ]\n",
+ rominfo[0], rominfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_private_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_private_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
- switch (dev->category) {
+ switch (fres->category) {
case NUBUS_CAT_DISPLAY:
- nubus_show_display_resource(dev, ent);
+ nubus_get_display_resource(fres, procdir, ent);
break;
case NUBUS_CAT_NETWORK:
- nubus_show_network_resource(dev, ent);
+ nubus_get_network_resource(fres, procdir, ent);
break;
case NUBUS_CAT_CPU:
- nubus_show_cpu_resource(dev, ent);
+ nubus_get_cpu_resource(fres, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static struct nubus_dev * __init
+static struct nubus_rsrc * __init
nubus_get_functional_resource(struct nubus_board *board, int slot,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
struct nubus_dirent ent;
- struct nubus_dev *dev;
+ struct nubus_rsrc *fres;
- pr_info(" Function 0x%02x:\n", parent->type);
+ pr_debug(" Functional resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
-
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
/* Actually we should probably panic if this fails */
- if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
+ fres = kzalloc(sizeof(*fres), GFP_ATOMIC);
+ if (!fres)
return NULL;
- dev->resid = parent->type;
- dev->directory = dir.base;
- dev->board = board;
+ fres->resid = parent->type;
+ fres->directory = dir.base;
+ fres->board = board;
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -458,130 +513,96 @@ nubus_get_functional_resource(struct nubus_board *board, int slot,
unsigned short nbtdata[4];
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- dev->category = nbtdata[0];
- dev->type = nbtdata[1];
- dev->dr_sw = nbtdata[2];
- dev->dr_hw = nbtdata[3];
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ fres->category = nbtdata[0];
+ fres->type = nbtdata[1];
+ fres->dr_sw = nbtdata[2];
+ fres->dr_hw = nbtdata[3];
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
{
- nubus_get_rsrc_str(dev->name, &ent, 64);
- pr_info(" name: %s\n", dev->name);
+ char name[64];
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ pr_debug(" name: %s\n", name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
}
case NUBUS_RESID_DRVRDIR:
{
/* MacOS driver. If we were NetBSD we might
use this :-) */
- struct nubus_dir drvr_dir;
- struct nubus_dirent drvr_ent;
-
- nubus_get_subdir(&ent, &drvr_dir);
- nubus_readdir(&drvr_dir, &drvr_ent);
- dev->driver = nubus_dirptr(&drvr_ent);
- pr_info(" driver at: 0x%p\n", dev->driver);
+ pr_debug(" driver directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
}
case NUBUS_RESID_MINOR_BASEOS:
+ {
/* We will need this in order to support
multiple framebuffers. It might be handy
for Ethernet as well */
- nubus_get_rsrc_mem(&dev->iobase, &ent, 4);
- pr_info(" memory offset: 0x%08lx\n", dev->iobase);
+ u32 base_offset;
+
+ nubus_get_rsrc_mem(&base_offset, &ent, 4);
+ pr_debug(" memory offset: 0x%08x\n", base_offset);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_MINOR_LENGTH:
+ {
/* Ditto */
- nubus_get_rsrc_mem(&dev->iosize, &ent, 4);
- pr_info(" memory length: 0x%08lx\n", dev->iosize);
+ u32 length;
+
+ nubus_get_rsrc_mem(&length, &ent, 4);
+ pr_debug(" memory length: 0x%08x\n", length);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_FLAGS:
- dev->flags = ent.data;
- pr_info(" flags: 0x%06x\n", dev->flags);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- dev->hwdevid = ent.data;
- pr_info(" hwdevid: 0x%06x\n", dev->hwdevid);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
/* Local/Private resources have their own
function */
- nubus_show_private_resource(dev, &ent);
+ nubus_get_private_resource(fres, dir.procdir, &ent);
}
}
- return dev;
-}
-
-/* This is cool. */
-static int __init nubus_get_vidnames(struct nubus_board *board,
- const struct nubus_dirent *parent)
-{
- struct nubus_dir dir;
- struct nubus_dirent ent;
-
- /* FIXME: obviously we want to put this in a header file soon */
- struct vidmode {
- u32 size;
- /* Don't know what this is yet */
- u16 id;
- /* Longest one I've seen so far is 26 characters */
- char name[32];
- };
-
- pr_info(" video modes supported:\n");
- nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
-
- while (nubus_readdir(&dir, &ent) != -1) {
- struct vidmode mode;
- u32 size;
-
- /* First get the length */
- nubus_get_rsrc_mem(&size, &ent, 4);
-
- /* Now clobber the whole thing */
- if (size > sizeof(mode) - 1)
- size = sizeof(mode) - 1;
- memset(&mode, 0, sizeof(mode));
- nubus_get_rsrc_mem(&mode, &ent, size);
- pr_info(" %02X: (%02X) %s\n", ent.type,
- mode.id, mode.name);
- }
- return 0;
+ return fres;
}
/* This is *really* cool. */
static int __init nubus_get_icon(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *ent)
{
/* Should be 32x32 if my memory serves me correctly */
- unsigned char icon[128];
- int x, y;
+ u32 icon[32];
+ int i;
nubus_get_rsrc_mem(&icon, ent, 128);
- pr_info(" icon:\n");
-
- /* We should actually plot these somewhere in the framebuffer
- init. This is just to demonstrate that they do, in fact,
- exist */
- for (y = 0; y < 32; y++) {
- pr_info(" ");
- for (x = 0; x < 32; x++) {
- if (icon[y * 4 + x / 8] & (0x80 >> (x % 8)))
- pr_cont("*");
- else
- pr_cont(" ");
- }
- pr_cont("\n");
- }
+ pr_debug(" icon:\n");
+ for (i = 0; i < 8; i++)
+ pr_debug(" %08x %08x %08x %08x\n",
+ icon[i * 4 + 0], icon[i * 4 + 1],
+ icon[i * 4 + 2], icon[i * 4 + 3]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 128);
+
return 0;
}
static int __init nubus_get_vendorinfo(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
@@ -589,19 +610,20 @@ static int __init nubus_get_vendorinfo(struct nubus_board *board,
static char *vendor_fields[6] = { "ID", "serial", "revision",
"part", "date", "unknown field" };
- pr_info(" vendor info:\n");
+ pr_debug(" vendor info:\n");
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
char name[64];
+ unsigned int len;
/* These are all strings, we think */
- nubus_get_rsrc_str(name, &ent, 64);
- if (ent.type > 5)
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ if (ent.type < 1 || ent.type > 5)
ent.type = 5;
- pr_info(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ pr_debug(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
}
return 0;
}
@@ -612,9 +634,9 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
struct nubus_dir dir;
struct nubus_dirent ent;
+ pr_debug(" Board resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -625,64 +647,81 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
useful except insofar as it tells us that
we really are looking at a board resource. */
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
if (nbtdata[0] != 1 || nbtdata[1] != 0 ||
nbtdata[2] != 0 || nbtdata[3] != 0)
- pr_err("this sResource is not a board resource!\n");
+ pr_err("Slot %X: sResource is not a board resource!\n",
+ slot);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
- nubus_get_rsrc_str(board->name, &ent, 64);
- pr_info(" name: %s\n", board->name);
+ {
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(board->name, &ent,
+ sizeof(board->name));
+ pr_debug(" name: %s\n", board->name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
+ }
case NUBUS_RESID_ICON:
- nubus_get_icon(board, &ent);
+ nubus_get_icon(board, dir.procdir, &ent);
break;
case NUBUS_RESID_BOARDID:
- pr_info(" board id: 0x%x\n", ent.data);
+ pr_debug(" board id: 0x%x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_PRIMARYINIT:
- pr_info(" primary init offset: 0x%06x\n", ent.data);
+ pr_debug(" primary init offset: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_VENDORINFO:
- nubus_get_vendorinfo(board, &ent);
+ nubus_get_vendorinfo(board, dir.procdir, &ent);
break;
case NUBUS_RESID_FLAGS:
- pr_info(" flags: 0x%06x\n", ent.data);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- pr_info(" hwdevid: 0x%06x\n", ent.data);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_SECONDINIT:
- pr_info(" secondary init offset: 0x%06x\n", ent.data);
+ pr_debug(" secondary init offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
/* WTF isn't this in the functional resources? */
case NUBUS_RESID_VIDNAMES:
- nubus_get_vidnames(board, &ent);
+ pr_debug(" vidnames directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
/* Same goes for this */
case NUBUS_RESID_VIDMODES:
- pr_info(" video mode parameter directory offset: 0x%06x\n",
- ent.data);
+ pr_debug(" video mode parameter directory offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent.type, ent.data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
}
}
return 0;
}
-/* Add a board (might be many devices) to the list */
-static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
+static void __init nubus_add_board(int slot, int bytelanes)
{
struct nubus_board *board;
- struct nubus_board **boardp;
unsigned char *rp;
unsigned long dpat;
struct nubus_dir dir;
struct nubus_dirent ent;
+ int prev_resid = -1;
/* Move to the start of the format block */
rp = nubus_rom_addr(slot);
@@ -690,19 +729,19 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Actually we should probably panic if this fails */
if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
- return NULL;
+ return;
board->fblock = rp;
/* Dump the format block for debugging purposes */
pr_debug("Slot %X, format block at 0x%p:\n", slot, rp);
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp = board->fblock;
board->slot = slot;
@@ -722,10 +761,10 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Directory offset should be small and negative... */
if (!(board->doffset & 0x00FF0000))
- pr_warn("Dodgy doffset!\n");
+ pr_warn("Slot %X: Dodgy doffset!\n", slot);
dpat = nubus_get_rom(&rp, 4, bytelanes);
if (dpat != NUBUS_TEST_PATTERN)
- pr_warn("Wrong test pattern %08lx!\n", dpat);
+ pr_warn("Slot %X: Wrong test pattern %08lx!\n", slot, dpat);
/*
* I wonder how the CRC is meant to work -
@@ -742,53 +781,52 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
nubus_get_root_dir(board, &dir);
/* We're ready to rock */
- pr_info("Slot %X:\n", slot);
+ pr_debug("Slot %X resources:\n", slot);
/* Each slot should have one board resource and any number of
- functional resources. So we'll fill in some fields in the
- struct nubus_board from the board resource, then walk down
- the list of functional resources, spinning out a nubus_dev
- for each of them. */
+ * functional resources. So we'll fill in some fields in the
+ * struct nubus_board from the board resource, then walk down
+ * the list of functional resources, spinning out a nubus_rsrc
+ * for each of them.
+ */
if (nubus_readdir(&dir, &ent) == -1) {
/* We can't have this! */
- pr_err("Board resource not found!\n");
- return NULL;
- } else {
- pr_info(" Board resource:\n");
- nubus_get_board_resource(board, slot, &ent);
+ pr_err("Slot %X: Board resource not found!\n", slot);
+ kfree(board);
+ return;
}
+ if (ent.type < 1 || ent.type > 127)
+ pr_warn("Slot %X: Board resource ID is invalid!\n", slot);
+
+ board->procdir = nubus_proc_add_board(board);
+
+ nubus_get_board_resource(board, slot, &ent);
+
while (nubus_readdir(&dir, &ent) != -1) {
- struct nubus_dev *dev;
- struct nubus_dev **devp;
+ struct nubus_rsrc *fres;
- dev = nubus_get_functional_resource(board, slot, &ent);
- if (dev == NULL)
+ fres = nubus_get_functional_resource(board, slot, &ent);
+ if (fres == NULL)
continue;
- /* We zeroed this out above */
- if (board->first_dev == NULL)
- board->first_dev = dev;
+ /* Resources should appear in ascending ID order. This sanity
+ * check prevents duplicate resource IDs.
+ */
+ if (fres->resid <= prev_resid) {
+ kfree(fres);
+ continue;
+ }
+ prev_resid = fres->resid;
- /* Put it on the global NuBus device chain. Keep entries in order. */
- for (devp = &nubus_devices; *devp != NULL;
- devp = &((*devp)->next))
- /* spin */;
- *devp = dev;
- dev->next = NULL;
+ list_add_tail(&fres->list, &nubus_func_rsrcs);
}
- /* Put it on the global NuBus board chain. Keep entries in order. */
- for (boardp = &nubus_boards; *boardp != NULL;
- boardp = &((*boardp)->next))
- /* spin */;
- *boardp = board;
- board->next = NULL;
-
- return board;
+ if (nubus_device_register(board))
+ put_device(&board->dev);
}
-void __init nubus_probe_slot(int slot)
+static void __init nubus_probe_slot(int slot)
{
unsigned char dp;
unsigned char *rp;
@@ -796,11 +834,8 @@ void __init nubus_probe_slot(int slot)
rp = nubus_rom_addr(slot);
for (i = 4; i; i--) {
- int card_present;
-
rp--;
- card_present = hwreg_present(rp);
- if (!card_present)
+ if (!hwreg_present(rp))
continue;
dp = *rp;
@@ -822,10 +857,11 @@ void __init nubus_probe_slot(int slot)
}
}
-void __init nubus_scan_bus(void)
+static void __init nubus_scan_bus(void)
{
int slot;
+ pr_info("NuBus: Scanning NuBus slots.\n");
for (slot = 9; slot < 15; slot++) {
nubus_probe_slot(slot);
}
@@ -833,14 +869,16 @@ void __init nubus_scan_bus(void)
static int __init nubus_init(void)
{
+ int err;
+
if (!MACH_IS_MAC)
return 0;
- pr_info("NuBus: Scanning NuBus slots.\n");
- nubus_devices = NULL;
- nubus_boards = NULL;
- nubus_scan_bus();
nubus_proc_init();
+ err = nubus_bus_register();
+ if (err)
+ return err;
+ nubus_scan_bus();
return 0;
}
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 004a122ac0ff..c2e5a7e6bd3e 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -11,39 +11,37 @@
structure in /proc analogous to the structure of the NuBus ROM
resources.
- Therefore each NuBus device is in fact a directory, which may in
- turn contain subdirectories. The "files" correspond to NuBus
- resource records. For those types of records which we know how to
- convert to formats that are meaningful to userspace (mostly just
- icons) these files will provide "cooked" data. Otherwise they will
- simply provide raw access (read-only of course) to the ROM. */
+ Therefore each board function gets a directory, which may in turn
+ contain subdirectories. Each slot resource is a file. Unrecognized
+ resources are empty files, since every resource ID requires a special
+ case (e.g. if the resource ID implies a directory or block, then its
+ value has to be interpreted as a slot ROM pointer etc.).
+ */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/nubus.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
-
#include <linux/uaccess.h>
#include <asm/byteorder.h>
+/*
+ * /proc/bus/nubus/devices stuff
+ */
+
static int
nubus_devices_proc_show(struct seq_file *m, void *v)
{
- struct nubus_dev *dev = nubus_devices;
-
- while (dev) {
- seq_printf(m, "%x\t%04x %04x %04x %04x",
- dev->board->slot,
- dev->category,
- dev->type,
- dev->dr_sw,
- dev->dr_hw);
- seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
- dev = dev->next;
- }
+ struct nubus_rsrc *fres;
+
+ for_each_func_rsrc(fres)
+ seq_printf(m, "%x\t%04x %04x %04x %04x\t%08lx\n",
+ fres->board->slot, fres->category, fres->type,
+ fres->dr_sw, fres->dr_hw, fres->board->slot_addr);
return 0;
}
@@ -61,174 +59,163 @@ static const struct file_operations nubus_devices_proc_fops = {
static struct proc_dir_entry *proc_bus_nubus_dir;
-static const struct file_operations nubus_proc_subdir_fops = {
-#warning Need to set some I/O handlers here
-};
+/*
+ * /proc/bus/nubus/x/ stuff
+ */
-static void nubus_proc_subdir(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* dir)
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* Some of these are directories, others aren't */
- while (nubus_readdir(dir, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
-
- sprintf(name, "%x", ent.type);
- e = proc_create(name, S_IFREG | S_IRUGO | S_IWUSR, parent,
- &nubus_proc_subdir_fops);
- if (!e)
- return;
- }
+ char name[2];
+
+ if (!proc_bus_nubus_dir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", board->slot);
+ return proc_mkdir(name, proc_bus_nubus_dir);
}
-/* Can't do this recursively since the root directory is structured
- somewhat differently from the subdirectories */
-static void nubus_proc_populate(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* root)
+/* The PDE private data for any directory under /proc/bus/nubus/x/
+ * is the bytelanes value for the board in slot x.
+ */
+
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* We know these are all directories (board resource + one or
- more functional resources) */
- while (nubus_readdir(root, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
- struct nubus_dir dir;
-
- sprintf(name, "%x", ent.type);
- e = proc_mkdir(name, parent);
- if (!e) return;
-
- /* And descend */
- if (nubus_get_subdir(&ent, &dir) == -1) {
- /* This shouldn't happen */
- printk(KERN_ERR "NuBus root directory node %x:%x has no subdir!\n",
- dev->board->slot, ent.type);
- continue;
- } else {
- nubus_proc_subdir(dev, e, &dir);
- }
- }
+ char name[9];
+ int lanes = board->lanes;
+
+ if (!procdir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", ent->type);
+ return proc_mkdir_data(name, 0555, procdir, (void *)lanes);
}
-int nubus_proc_attach_device(struct nubus_dev *dev)
+/* The PDE private data for a file under /proc/bus/nubus/x/ is a pointer to
+ * an instance of the following structure, which gives the location and size
+ * of the resource data in the slot ROM. For slot resources which hold only a
+ * small integer, this integer value is stored directly and size is set to 0.
+ * A NULL private data pointer indicates an unrecognized resource.
+ */
+
+struct nubus_proc_pde_data {
+ unsigned char *res_ptr;
+ unsigned int res_size;
+};
+
+static struct nubus_proc_pde_data *
+nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
{
- struct proc_dir_entry *e;
- struct nubus_dir root;
- char name[8];
-
- if (dev == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- return -1;
- }
-
- if (dev->board == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- printk("dev = %p, dev->board = %p\n", dev, dev->board);
- return -1;
- }
-
- /* Create a directory */
- sprintf(name, "%x", dev->board->slot);
- e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir);
- if (!e)
- return -ENOMEM;
+ struct nubus_proc_pde_data *pde_data;
- /* Now recursively populate it with files */
- nubus_get_root_dir(dev->board, &root);
- nubus_proc_populate(dev, e, &root);
+ pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
+ if (!pde_data)
+ return NULL;
- return 0;
+ pde_data->res_ptr = ptr;
+ pde_data->res_size = size;
+ return pde_data;
}
-EXPORT_SYMBOL(nubus_proc_attach_device);
-/*
- * /proc/nubus stuff
- */
-static int nubus_proc_show(struct seq_file *m, void *v)
+static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
{
- const struct nubus_board *board = v;
+ struct inode *inode = m->private;
+ struct nubus_proc_pde_data *pde_data;
- /* Display header on line 1 */
- if (v == SEQ_START_TOKEN)
- seq_puts(m, "Nubus devices found:\n");
- else
- seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ pde_data = PDE_DATA(inode);
+ if (!pde_data)
+ return 0;
+
+ if (pde_data->res_size > m->size)
+ return -EFBIG;
+
+ if (pde_data->res_size) {
+ int lanes = (int)proc_get_parent_data(inode);
+ struct nubus_dirent ent;
+
+ if (!lanes)
+ return 0;
+
+ ent.mask = lanes;
+ ent.base = pde_data->res_ptr;
+ ent.data = 0;
+ nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
+ } else {
+ unsigned int data = (unsigned int)pde_data->res_ptr;
+
+ seq_putc(m, data >> 16);
+ seq_putc(m, data >> 8);
+ seq_putc(m, data >> 0);
+ }
return 0;
}
-static void *nubus_proc_start(struct seq_file *m, loff_t *_pos)
+static int nubus_proc_rsrc_open(struct inode *inode, struct file *file)
{
- struct nubus_board *board;
- unsigned pos;
-
- if (*_pos > LONG_MAX)
- return NULL;
- pos = *_pos;
- if (pos == 0)
- return SEQ_START_TOKEN;
- for (board = nubus_boards; board; board = board->next)
- if (--pos == 0)
- break;
- return board;
+ return single_open(file, nubus_proc_rsrc_show, inode);
}
-static void *nubus_proc_next(struct seq_file *p, void *v, loff_t *_pos)
+static const struct file_operations nubus_proc_rsrc_fops = {
+ .open = nubus_proc_rsrc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size)
{
- /* Walk the list of NuBus boards */
- struct nubus_board *board = v;
-
- ++*_pos;
- if (v == SEQ_START_TOKEN)
- board = nubus_boards;
- else if (board)
- board = board->next;
- return board;
+ char name[9];
+ struct nubus_proc_pde_data *pde_data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ if (size)
+ pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ else
+ pde_data = NULL;
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops, pde_data);
}
-static void nubus_proc_stop(struct seq_file *p, void *v)
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
+ char name[9];
+ unsigned char *data = (unsigned char *)ent->data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops,
+ nubus_proc_alloc_pde_data(data, 0));
}
-static const struct seq_operations nubus_proc_seqops = {
- .start = nubus_proc_start,
- .next = nubus_proc_next,
- .stop = nubus_proc_stop,
- .show = nubus_proc_show,
-};
+/*
+ * /proc/nubus stuff
+ */
static int nubus_proc_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &nubus_proc_seqops);
+ return single_open(file, nubus_proc_show, NULL);
}
static const struct file_operations nubus_proc_fops = {
.open = nubus_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
-void __init proc_bus_nubus_add_devices(void)
-{
- struct nubus_dev *dev;
-
- for(dev = nubus_devices; dev; dev = dev->next)
- nubus_proc_attach_device(dev);
-}
-
void __init nubus_proc_init(void)
{
proc_create("nubus", 0, NULL, &nubus_proc_fops);
- if (!MACH_IS_MAC)
- return;
proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
+ if (!proc_bus_nubus_dir)
+ return;
proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
- proc_bus_nubus_add_devices();
}
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a25fd43650ad..441e67e3a9d7 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
@@ -6,6 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
+nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 839650e0926a..e8104871cbbf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -29,6 +29,9 @@
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#include "nvme.h"
#include "fabrics.h"
@@ -65,9 +68,26 @@ static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+/*
+ * nvme_wq - hosts nvme related works that are not reset or delete
+ * nvme_reset_wq - hosts nvme reset works
+ * nvme_delete_wq - hosts nvme delete works
+ *
+ * nvme_wq will host works such are scan, aen handling, fw activation,
+ * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * runs reset works which also flush works hosted on nvme_wq for
+ * serialization purposes. nvme_delete_wq host controller deletion
+ * works which flush reset works for serialization.
+ */
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
+struct workqueue_struct *nvme_reset_wq;
+EXPORT_SYMBOL_GPL(nvme_reset_wq);
+
+struct workqueue_struct *nvme_delete_wq;
+EXPORT_SYMBOL_GPL(nvme_delete_wq);
+
static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
@@ -89,13 +109,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->reset_work))
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -104,6 +124,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
flush_work(&ctrl->reset_work);
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_delete_ctrl_work(struct work_struct *work)
{
@@ -122,7 +143,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->delete_work))
+ if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
}
@@ -157,13 +178,20 @@ static blk_status_t nvme_error_status(struct request *req)
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
+ case NVME_SC_LBA_RANGE:
+ return BLK_STS_TARGET;
+ case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
return BLK_STS_NOTSUPP;
case NVME_SC_WRITE_FAULT:
case NVME_SC_READ_ERROR:
case NVME_SC_UNWRITTEN_BLOCK:
case NVME_SC_ACCESS_DENIED:
case NVME_SC_READ_ONLY:
+ case NVME_SC_COMPARE_FAILED:
return BLK_STS_MEDIUM;
case NVME_SC_GUARD_CHECK:
case NVME_SC_APPTAG_CHECK:
@@ -190,8 +218,12 @@ static inline bool nvme_req_needs_retry(struct request *req)
void nvme_complete_rq(struct request *req)
{
- if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
- if (nvme_req_needs_failover(req)) {
+ blk_status_t status = nvme_error_status(req);
+
+ trace_nvme_complete_rq(req);
+
+ if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
+ if (nvme_req_needs_failover(req, status)) {
nvme_failover_req(req);
return;
}
@@ -202,8 +234,7 @@ void nvme_complete_rq(struct request *req)
return;
}
}
-
- blk_mq_end_request(req, nvme_error_status(req));
+ blk_mq_end_request(req, status);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -232,6 +263,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state;
switch (new_state) {
+ case NVME_CTRL_ADMIN_ONLY:
+ switch (old_state) {
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
@@ -247,6 +287,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
@@ -266,6 +307,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_RECONNECTING:
changed = true;
@@ -591,6 +633,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
+ if (ns)
+ trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+ else
+ trace_nvme_setup_admin_cmd(cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -1217,16 +1263,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
#ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(ns->head->disk))
- return -ENXIO;
+ goto fail;
#endif
if (!kref_get_unless_zero(&ns->kref))
- return -ENXIO;
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+
return 0;
+
+fail_put_ns:
+ nvme_put_ns(ns);
+fail:
+ return -ENXIO;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- nvme_put_ns(disk->private_data);
+ struct nvme_ns *ns = disk->private_data;
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -2052,6 +2109,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+{
+ int count = 0;
+ struct nvme_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD)
+ count++;
+ }
+ mutex_unlock(&subsys->lock);
+
+ return count;
+}
+
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
struct nvme_subsystem *subsys, *found;
@@ -2090,7 +2163,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
- if (!(id->cmic & (1 << 1))) {
+ if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
@@ -2257,7 +2330,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
shutdown_timeout, 60);
if (ctrl->shutdown_timeout != shutdown_timeout)
- dev_warn(ctrl->device,
+ dev_info(ctrl->device,
"Shutdown timeout set to %u seconds\n",
ctrl->shutdown_timeout);
} else
@@ -2341,8 +2414,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
- if (ctrl->state != NVME_CTRL_LIVE)
+ switch (ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
+ break;
+ default:
return -EWOULDBLOCK;
+ }
+
file->private_data = ctrl;
return 0;
}
@@ -2606,6 +2685,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_RECONNECTING]= "reconnecting",
[NVME_CTRL_DELETING] = "deleting",
@@ -3079,6 +3159,8 @@ static void nvme_scan_work(struct work_struct *work)
if (ctrl->state != NVME_CTRL_LIVE)
return;
+ WARN_ON_ONCE(!ctrl->tagset);
+
if (nvme_identify_ctrl(ctrl, &id))
return;
@@ -3099,8 +3181,7 @@ static void nvme_scan_work(struct work_struct *work)
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
- * Do not queue new scan work when a controller is reset during
- * removal.
+ * Only new queue scan work when admin and IO queues are both alive
*/
if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->scan_work);
@@ -3477,16 +3558,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
int __init nvme_core_init(void)
{
- int result;
+ int result = -ENOMEM;
nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq)
- return -ENOMEM;
+ goto out;
+
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_reset_wq)
+ goto destroy_wq;
+
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_delete_wq)
+ goto destroy_reset_wq;
result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0)
- goto destroy_wq;
+ goto destroy_delete_wq;
nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) {
@@ -3505,8 +3596,13 @@ destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+destroy_delete_wq:
+ destroy_workqueue(nvme_delete_wq);
+destroy_reset_wq:
+ destroy_workqueue(nvme_reset_wq);
destroy_wq:
destroy_workqueue(nvme_wq);
+out:
return result;
}
@@ -3516,6 +3612,8 @@ void nvme_core_exit(void)
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ destroy_workqueue(nvme_delete_wq);
+ destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 894c2ccb3891..5dd4ceefed8f 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
*/
int nvmf_register_transport(struct nvmf_transport_ops *ops)
{
- if (!ops->create_ctrl)
+ if (!ops->create_ctrl || !ops->module)
return -EINVAL;
down_write(&nvmf_transports_rwsem);
@@ -739,11 +739,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
- if (uuid_parse(p, &hostid)) {
+ ret = uuid_parse(p, &hostid);
+ if (ret) {
pr_err("Invalid hostid %s\n", p);
ret = -EINVAL;
+ kfree(p);
goto out;
}
+ kfree(p);
break;
case NVMF_OPT_DUP_CONNECT:
opts->duplicate_connect = true;
@@ -869,32 +872,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock;
}
+ if (!try_module_get(ops->module)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
ret = nvmf_check_required_opts(opts, ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
ops->allowed_opts | ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ctrl = ops->create_ctrl(dev, opts);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
- goto out_unlock;
+ goto out_module_put;
}
if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
ctrl->subsys->subnqn);
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL);
}
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
return ctrl;
+out_module_put:
+ module_put(ops->module);
out_unlock:
up_read(&nvmf_transports_rwsem);
out_free_opts:
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 9ba614953607..25b19f722f5b 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
* fabric implementation of NVMe fabrics.
* @entry: Used by the fabrics library to add the new
* registration entry to its linked-list internal tree.
+ * @module: Transport module reference
* @name: Name of the NVMe fabric driver implementation.
* @required_opts: sysfs command-line options that must be specified
* when adding a new NVMe controller.
@@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
*/
struct nvmf_transport_ops {
struct list_head entry;
+ struct module *module;
const char *name;
int required_opts;
int allowed_opts;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 794e66e4aa20..99bf51c7e513 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]);
+ /* re-enable the admin_q so anything new can fast fail */
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
@@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
+
+ /* resume the io queues so that things will fast fail */
+ nvme_start_queues(nctrl);
}
static void
@@ -3380,6 +3386,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
static struct nvmf_transport_ops nvme_fc_transport = {
.name = "fc",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
.allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
.create_ctrl = nvme_fc_create_ctrl,
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ba3d7f3349e5..50ef71ee3d86 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -31,27 +31,10 @@
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2,
- nvme_nvm_admin_get_l2p_tbl = 0xea,
nvme_nvm_admin_get_bb_tbl = 0xf2,
nvme_nvm_admin_set_bb_tbl = 0xf1,
};
-struct nvme_nvm_hb_rw {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2;
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 slba;
-};
-
struct nvme_nvm_ph_rw {
__u8 opcode;
__u8 flags;
@@ -80,19 +63,6 @@ struct nvme_nvm_identity {
__u32 rsvd11[5];
};
-struct nvme_nvm_l2ptbl {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __le32 cdw2[4];
- __le64 prp1;
- __le64 prp2;
- __le64 slba;
- __le32 nlb;
- __le16 cdw14[6];
-};
-
struct nvme_nvm_getbbtbl {
__u8 opcode;
__u8 flags;
@@ -139,9 +109,7 @@ struct nvme_nvm_command {
union {
struct nvme_common_command common;
struct nvme_nvm_identity identity;
- struct nvme_nvm_hb_rw hb_rw;
struct nvme_nvm_ph_rw ph_rw;
- struct nvme_nvm_l2ptbl l2p;
struct nvme_nvm_getbbtbl get_bb;
struct nvme_nvm_setbbtbl set_bb;
struct nvme_nvm_erase_blk erase;
@@ -167,7 +135,7 @@ struct nvme_nvm_id_group {
__u8 num_lun;
__u8 num_pln;
__u8 rsvd1;
- __le16 num_blk;
+ __le16 num_chk;
__le16 num_pg;
__le16 fpg_sz;
__le16 csecs;
@@ -234,11 +202,9 @@ struct nvme_nvm_bb_tbl {
static inline void _nvme_nvm_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
@@ -249,51 +215,58 @@ static inline void _nvme_nvm_check_size(void)
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
struct nvme_nvm_id_group *src;
- struct nvm_id_group *dst;
+ struct nvm_id_group *grp;
+ int sec_per_pg, sec_per_pl, pg_per_blk;
if (nvme_nvm_id->cgrps != 1)
return -EINVAL;
src = &nvme_nvm_id->groups[0];
- dst = &nvm_id->grp;
-
- dst->mtype = src->mtype;
- dst->fmtype = src->fmtype;
- dst->num_ch = src->num_ch;
- dst->num_lun = src->num_lun;
- dst->num_pln = src->num_pln;
-
- dst->num_pg = le16_to_cpu(src->num_pg);
- dst->num_blk = le16_to_cpu(src->num_blk);
- dst->fpg_sz = le16_to_cpu(src->fpg_sz);
- dst->csecs = le16_to_cpu(src->csecs);
- dst->sos = le16_to_cpu(src->sos);
-
- dst->trdt = le32_to_cpu(src->trdt);
- dst->trdm = le32_to_cpu(src->trdm);
- dst->tprt = le32_to_cpu(src->tprt);
- dst->tprm = le32_to_cpu(src->tprm);
- dst->tbet = le32_to_cpu(src->tbet);
- dst->tbem = le32_to_cpu(src->tbem);
- dst->mpos = le32_to_cpu(src->mpos);
- dst->mccap = le32_to_cpu(src->mccap);
-
- dst->cpar = le16_to_cpu(src->cpar);
-
- if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
- memcpy(dst->lptbl.id, src->lptbl.id, 8);
- dst->lptbl.mlc.num_pairs =
- le16_to_cpu(src->lptbl.mlc.num_pairs);
-
- if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
- pr_err("nvm: number of MLC pairs not supported\n");
- return -EINVAL;
- }
+ grp = &nvm_id->grp;
+
+ grp->mtype = src->mtype;
+ grp->fmtype = src->fmtype;
+
+ grp->num_ch = src->num_ch;
+ grp->num_lun = src->num_lun;
+
+ grp->num_chk = le16_to_cpu(src->num_chk);
+ grp->csecs = le16_to_cpu(src->csecs);
+ grp->sos = le16_to_cpu(src->sos);
+
+ pg_per_blk = le16_to_cpu(src->num_pg);
+ sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
+ sec_per_pl = sec_per_pg * src->num_pln;
+ grp->clba = sec_per_pl * pg_per_blk;
+ grp->ws_per_chk = pg_per_blk;
- memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
- dst->lptbl.mlc.num_pairs);
+ grp->mpos = le32_to_cpu(src->mpos);
+ grp->cpar = le16_to_cpu(src->cpar);
+ grp->mccap = le32_to_cpu(src->mccap);
+
+ grp->ws_opt = grp->ws_min = sec_per_pg;
+ grp->ws_seq = NVM_IO_SNGL_ACCESS;
+
+ if (grp->mpos & 0x020202) {
+ grp->ws_seq = NVM_IO_DUAL_ACCESS;
+ grp->ws_opt <<= 1;
+ } else if (grp->mpos & 0x040404) {
+ grp->ws_seq = NVM_IO_QUAD_ACCESS;
+ grp->ws_opt <<= 2;
}
+ grp->trdt = le32_to_cpu(src->trdt);
+ grp->trdm = le32_to_cpu(src->trdm);
+ grp->tprt = le32_to_cpu(src->tprt);
+ grp->tprm = le32_to_cpu(src->tprm);
+ grp->tbet = le32_to_cpu(src->tbet);
+ grp->tbem = le32_to_cpu(src->tbem);
+
+ /* 1.2 compatibility */
+ grp->num_pln = src->num_pln;
+ grp->num_pg = le16_to_cpu(src->num_pg);
+ grp->fpg_sz = le16_to_cpu(src->fpg_sz);
+
return 0;
}
@@ -332,62 +305,6 @@ out:
return ret;
}
-static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
- u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
- u32 nlb_pr_rq = len / sizeof(u64);
- u64 cmd_slba = slba;
- void *entries;
- int ret = 0;
-
- c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
- c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
- entries = kmalloc(len, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- while (nlb) {
- u32 cmd_nlb = min(nlb_pr_rq, nlb);
- u64 elba = slba + cmd_nlb;
-
- c.l2p.slba = cpu_to_le64(cmd_slba);
- c.l2p.nlb = cpu_to_le32(cmd_nlb);
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
- (struct nvme_command *)&c, entries, len);
- if (ret) {
- dev_err(ns->ctrl->device,
- "L2P table transfer failed (%d)\n", ret);
- ret = -EIO;
- goto out;
- }
-
- if (unlikely(elba > nvmdev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Transform physical address to target address space */
- nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
-
- if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
- ret = -EINTR;
- goto out;
- }
-
- cmd_slba += cmd_nlb;
- nlb -= cmd_nlb;
- }
-
-out:
- kfree(entries);
- return ret;
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
u8 *blks)
{
@@ -397,7 +314,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = geo->blks_per_lun * geo->plane_mode;
+ int nr_blks = geo->nr_chks * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
@@ -438,7 +355,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
+ memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
out:
kfree(bb_tbl);
return ret;
@@ -474,10 +391,6 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-
- if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
- c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
- rqd->bio->bi_iter.bi_sector));
}
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
@@ -597,8 +510,6 @@ static void nvme_nvm_dev_dma_free(void *pool, void *addr,
static struct nvm_dev_ops nvme_nvm_dev_ops = {
.identity = nvme_nvm_identity,
- .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
-
.get_bb_tbl = nvme_nvm_get_bb_tbl,
.set_bb_tbl = nvme_nvm_set_bb_tbl,
@@ -883,7 +794,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
} else if (strcmp(attr->name, "num_planes") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
} else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
} else if (strcmp(attr->name, "num_pages") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
} else if (strcmp(attr->name, "page_size") == 0) {
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1218a9fca846..3b211d9e58b8 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -33,51 +33,11 @@ void nvme_failover_req(struct request *req)
kblockd_schedule_work(&ns->head->requeue_work);
}
-bool nvme_req_needs_failover(struct request *req)
+bool nvme_req_needs_failover(struct request *req, blk_status_t error)
{
if (!(req->cmd_flags & REQ_NVME_MPATH))
return false;
-
- switch (nvme_req(req)->status & 0x7ff) {
- /*
- * Generic command status:
- */
- case NVME_SC_INVALID_OPCODE:
- case NVME_SC_INVALID_FIELD:
- case NVME_SC_INVALID_NS:
- case NVME_SC_LBA_RANGE:
- case NVME_SC_CAP_EXCEEDED:
- case NVME_SC_RESERVATION_CONFLICT:
- return false;
-
- /*
- * I/O command set specific error. Unfortunately these values are
- * reused for fabrics commands, but those should never get here.
- */
- case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_INVALID_PI:
- case NVME_SC_READ_ONLY:
- case NVME_SC_ONCS_NOT_SUPPORTED:
- WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
- nvme_fabrics_command);
- return false;
-
- /*
- * Media and Data Integrity Errors:
- */
- case NVME_SC_WRITE_FAULT:
- case NVME_SC_READ_ERROR:
- case NVME_SC_GUARD_CHECK:
- case NVME_SC_APPTAG_CHECK:
- case NVME_SC_REFTAG_CHECK:
- case NVME_SC_COMPARE_FAILED:
- case NVME_SC_ACCESS_DENIED:
- case NVME_SC_UNWRITTEN_BLOCK:
- return false;
- }
-
- /* Everything else could be a path failure, so should be retried */
- return true;
+ return blk_path_error(error);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a00eabd06427..8e4550fa08f8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -32,6 +32,8 @@ extern unsigned int admin_timeout;
#define NVME_KATO_GRACE 10
extern struct workqueue_struct *nvme_wq;
+extern struct workqueue_struct *nvme_reset_wq;
+extern struct workqueue_struct *nvme_delete_wq;
enum {
NVME_NS_LBA = 0,
@@ -119,6 +121,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
+ NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
@@ -393,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
@@ -401,7 +405,7 @@ extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
void nvme_failover_req(struct request *req);
-bool nvme_req_needs_failover(struct request *req);
+bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns_head *head);
@@ -430,7 +434,8 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline void nvme_failover_req(struct request *req)
{
}
-static inline bool nvme_req_needs_failover(struct request *req)
+static inline bool nvme_req_needs_failover(struct request *req,
+ blk_status_t error)
{
return false;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4276ebfff22b..6fe7af00a1f4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -75,7 +75,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct nvme_queue **queues;
+ struct nvme_queue *queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
@@ -365,7 +365,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
WARN_ON(hctx_idx != 0);
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@@ -387,7 +387,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+ struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
if (!nvmeq->tags)
nvmeq->tags = &dev->tagset.tags[hctx_idx];
@@ -403,7 +403,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = dev->queues[queue_idx];
+ struct nvme_queue *nvmeq = &dev->queues[queue_idx];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
@@ -1044,7 +1044,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
@@ -1138,9 +1138,14 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
*/
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
- /* If there is a reset ongoing, we shouldn't reset again. */
- if (dev->ctrl.state == NVME_CTRL_RESETTING)
+ /* If there is a reset/reinit ongoing, we shouldn't reset again. */
+ switch (dev->ctrl.state) {
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
return false;
+ default:
+ break;
+ }
/* We shouldn't reset unless the controller is on fatal error state
* _or_ if we lost the communication with it.
@@ -1280,7 +1285,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
if (nvmeq->sq_cmds)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1288,10 +1292,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
dev->ctrl.queue_count--;
- dev->queues[i] = NULL;
- nvme_free_queue(nvmeq);
+ nvme_free_queue(&dev->queues[i]);
}
}
@@ -1323,12 +1325,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
- struct nvme_queue *nvmeq = dev->queues[0];
-
- if (!nvmeq)
- return;
- if (nvme_suspend_queue(nvmeq))
- return;
+ struct nvme_queue *nvmeq = &dev->queues[0];
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
@@ -1367,7 +1364,7 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
@@ -1382,13 +1379,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int node)
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int node)
{
- struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
- node);
- if (!nvmeq)
- return NULL;
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (dev->ctrl.queue_count > qid)
+ return 0;
nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
&nvmeq->cq_dma_addr, GFP_KERNEL);
@@ -1407,17 +1404,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_depth = depth;
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
- dev->queues[qid] = nvmeq;
dev->ctrl.queue_count++;
- return nvmeq;
+ return 0;
free_cqdma:
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
- kfree(nvmeq);
- return NULL;
+ return -ENOMEM;
}
static int queue_request_irq(struct nvme_queue *nvmeq)
@@ -1590,14 +1585,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- nvmeq = dev->queues[0];
- if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
- dev_to_node(dev->dev));
- if (!nvmeq)
- return -ENOMEM;
- }
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
+ dev_to_node(dev->dev));
+ if (result)
+ return result;
+ nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1627,7 +1620,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
/* vector == qid - 1, match nvme_create_queue */
- if (!nvme_alloc_queue(dev, i, dev->q_depth,
+ if (nvme_alloc_queue(dev, i, dev->q_depth,
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
ret = -ENOMEM;
break;
@@ -1636,15 +1629,15 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
- ret = nvme_create_queue(dev->queues[i], i);
+ ret = nvme_create_queue(&dev->queues[i], i);
if (ret)
break;
}
/*
* Ignore failing Create SQ/CQ commands, we can continue with less
- * than the desired aount of queues, and even a controller without
- * I/O queues an still be used to issue admin commands. This might
+ * than the desired amount of queues, and even a controller without
+ * I/O queues can still be used to issue admin commands. This might
* be useful to upgrade a buggy firmware for example.
*/
return ret >= 0 ? 0 : ret;
@@ -1661,30 +1654,40 @@ static ssize_t nvme_cmb_show(struct device *dev,
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
{
- u64 szu, size, offset;
+ u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
+
+ return 1ULL << (12 + 4 * szu);
+}
+
+static u32 nvme_cmb_size(struct nvme_dev *dev)
+{
+ return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
+}
+
+static void nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 size, offset;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- void __iomem *cmb;
int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
- if (!(NVME_CMB_SZ(dev->cmbsz)))
- return NULL;
+ if (!dev->cmbsz)
+ return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
if (!use_cmb_sqes)
- return NULL;
+ return;
- szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
- size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
+ offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
- return NULL;
+ return;
/*
* Controllers may support a CMB size larger than their BAR,
@@ -1694,13 +1697,16 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!cmb)
- return NULL;
-
+ dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
+ if (!dev->cmb)
+ return;
dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
- return cmb;
+
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->ctrl.device,
+ "failed to add sysfs attribute for CMB\n");
}
static inline void nvme_release_cmb(struct nvme_dev *dev)
@@ -1768,7 +1774,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dma_addr_t descs_dma;
int i = 0;
void **bufs;
- u64 size = 0, tmp;
+ u64 size, tmp;
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
@@ -1851,7 +1857,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
u64 min = (u64)dev->ctrl.hmmin * 4096;
u32 enable_bits = NVME_HOST_MEM_ENABLE;
- int ret = 0;
+ int ret;
preferred = min(preferred, max);
if (min > max) {
@@ -1892,7 +1898,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = dev->queues[0];
+ struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues;
unsigned long size;
@@ -1905,7 +1911,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+ if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
@@ -2005,9 +2011,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
+static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -2018,7 +2024,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
retry:
timeout = ADMIN_TIMEOUT;
for (; i > 0; i--, sent++)
- if (nvme_delete_queue(dev->queues[i], opcode))
+ if (nvme_delete_queue(&dev->queues[i], opcode))
break;
while (sent--) {
@@ -2033,13 +2039,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
}
/*
- * Return: error value if an error occurred setting up the queues or calling
- * Identify Device. 0 if these succeeded, even if adding some of the
- * namespaces failed. At the moment, these failures are silent. TBD which
- * failures should be reported.
+ * return error value only when tagset allocation failed
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ int ret;
+
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2055,8 +2060,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
- if (blk_mq_alloc_tag_set(&dev->tagset))
- return 0;
+ ret = blk_mq_alloc_tag_set(&dev->tagset);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return ret;
+ }
dev->ctrl.tagset = &dev->tagset;
nvme_dbbuf_set(dev);
@@ -2122,22 +2131,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
"set queue depth=%u\n", dev->q_depth);
}
- /*
- * CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
- * has no name we can pass NULL as final argument to
- * sysfs_add_file_to_group.
- */
-
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
- dev->cmb = nvme_map_cmb(dev);
- if (dev->cmb) {
- if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL))
- dev_warn(dev->ctrl.device,
- "failed to add sysfs attribute for CMB\n");
- }
- }
+ nvme_map_cmb(dev);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -2170,7 +2164,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i, queues;
+ int i;
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -2205,21 +2199,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_stop_queues(&dev->ctrl);
- queues = dev->online_queues - 1;
- for (i = dev->ctrl.queue_count - 1; i > 0; i--)
- nvme_suspend_queue(dev->queues[i]);
-
- if (dead) {
- /* A device might become IO incapable very soon during
- * probe, before the admin queue is configured. Thus,
- * queue_count can be 0 here.
- */
- if (dev->ctrl.queue_count)
- nvme_suspend_queue(dev->queues[0]);
- } else {
- nvme_disable_io_queues(dev, queues);
+ if (!dead) {
+ nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
+ for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
+ nvme_suspend_queue(&dev->queues[i]);
+
nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2289,6 +2275,7 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
+ enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out;
@@ -2300,6 +2287,16 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ /*
+ * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+ * initializing procedure here.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller RECONNECTING\n");
+ goto out;
+ }
+
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -2352,15 +2349,23 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
+ new_state = NVME_CTRL_ADMIN_ONLY;
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ /* hit this only when allocate tagset fails */
+ if (nvme_dev_add(dev))
+ new_state = NVME_CTRL_ADMIN_ONLY;
nvme_unfreeze(&dev->ctrl);
}
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
- dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+ /*
+ * If only admin queue live, keep it to do further investigation or
+ * recovery.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller state %d\n", new_state);
goto out;
}
@@ -2468,8 +2473,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
- GFP_KERNEL, node);
+
+ dev->queues = kcalloc_node(num_possible_cpus() + 1,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -2496,10 +2502,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
- nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
- queue_work(nvme_wq, &dev->ctrl.reset_work);
+ nvme_reset_ctrl(&dev->ctrl);
+
return 0;
release_pools:
@@ -2523,7 +2529,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&dev->ctrl);
+ nvme_reset_ctrl_sync(&dev->ctrl);
}
static void nvme_shutdown(struct pci_dev *pdev)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2a0bba7f50cf..2bc059f7d73c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -66,7 +66,6 @@ struct nvme_rdma_request {
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
int nents;
- bool inline_data;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
@@ -1092,7 +1091,6 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- req->inline_data = true;
req->num_sge++;
return 0;
}
@@ -1164,7 +1162,6 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
int count, ret;
req->num_sge = 1;
- req->inline_data = false;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -2018,6 +2015,7 @@ out_free_ctrl:
static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
@@ -2040,7 +2038,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
static struct ib_client nvme_rdma_ib_client = {
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
new file mode 100644
index 000000000000..41944bbef835
--- /dev/null
+++ b/drivers/nvme/host/trace.c
@@ -0,0 +1,130 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 sqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 sq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 cqid = get_unaligned_le16(cdw10 + 6);
+
+
+ trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u",
+ sqid, qsize, sq_flags, cqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 cqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 cq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 irq_vector = get_unaligned_le16(cdw10 + 6);
+
+ trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u",
+ cqid, qsize, cq_flags, irq_vector);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+
+
+static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_create_sq:
+ return nvme_trace_create_sq(p, cdw10);
+ case nvme_admin_create_cq:
+ return nvme_trace_create_cq(p, cdw10);
+ case nvme_admin_identify:
+ return nvme_trace_admin_identify(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ return nvme_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvme_trace_dsm(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
new file mode 100644
index 000000000000..ea91fccd1bc0
--- /dev/null
+++ b/drivers/nvme/host/trace.h
@@ -0,0 +1,165 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvme
+
+#if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVME_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvme.h"
+
+#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
+#define show_admin_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_admin_opcode_name(nvme_admin_delete_sq), \
+ nvme_admin_opcode_name(nvme_admin_create_sq), \
+ nvme_admin_opcode_name(nvme_admin_get_log_page), \
+ nvme_admin_opcode_name(nvme_admin_delete_cq), \
+ nvme_admin_opcode_name(nvme_admin_create_cq), \
+ nvme_admin_opcode_name(nvme_admin_identify), \
+ nvme_admin_opcode_name(nvme_admin_abort_cmd), \
+ nvme_admin_opcode_name(nvme_admin_set_features), \
+ nvme_admin_opcode_name(nvme_admin_get_features), \
+ nvme_admin_opcode_name(nvme_admin_async_event), \
+ nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_activate_fw), \
+ nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_ns_attach), \
+ nvme_admin_opcode_name(nvme_admin_keep_alive), \
+ nvme_admin_opcode_name(nvme_admin_directive_send), \
+ nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_dbbuf), \
+ nvme_admin_opcode_name(nvme_admin_format_nvm), \
+ nvme_admin_opcode_name(nvme_admin_security_send), \
+ nvme_admin_opcode_name(nvme_admin_security_recv), \
+ nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_admin_cmd(opcode, cdw10) \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10)
+
+#define nvme_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_opcode_name(nvme_cmd_flush), \
+ nvme_opcode_name(nvme_cmd_write), \
+ nvme_opcode_name(nvme_cmd_read), \
+ nvme_opcode_name(nvme_cmd_write_uncor), \
+ nvme_opcode_name(nvme_cmd_compare), \
+ nvme_opcode_name(nvme_cmd_write_zeroes), \
+ nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_resv_register), \
+ nvme_opcode_name(nvme_cmd_resv_report), \
+ nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_resv_release))
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_cmd(opcode, cdw10) \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
+
+TRACE_EVENT(nvme_setup_admin_cmd,
+ TP_PROTO(struct nvme_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->cid, __entry->flags, __entry->metadata,
+ show_admin_opcode_name(__entry->opcode),
+ __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
+);
+
+
+TRACE_EVENT(nvme_setup_nvm_cmd,
+ TP_PROTO(int qid, struct nvme_command *cmd),
+ TP_ARGS(qid, cmd),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->qid = qid;
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->qid, __entry->nsid, __entry->cid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->opcode),
+ __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+);
+
+TRACE_EVENT(nvme_complete_rq,
+ TP_PROTO(struct request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->qid = req->q->id;
+ __entry->cid = req->tag;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ ),
+ TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->cid, __entry->qid, __entry->result,
+ __entry->retries, __entry->flags, __entry->status)
+
+);
+
+#endif /* _TRACE_NVME_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 03e4ab65fe77..5f4f8b16685f 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA
tristate "NVMe over Fabrics RDMA target support"
depends on INFINIBAND
depends on NVME_TARGET
+ select SGL_ALLOC
help
This enables the NVMe RDMA target support, which allows exporting NVMe
devices over RDMA.
@@ -39,6 +40,7 @@ config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
+ select SGL_ALLOC
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b54748ad5f48..0bd737117a80 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg_cnt = 0;
req->transfer_len = 0;
req->rsp->status = 0;
+ req->ns = NULL;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
@@ -830,7 +833,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
/* Don't accept keep-alive timeout for discovery controllers */
if (kato) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_free_sqs;
+ goto out_remove_ida;
}
/*
@@ -860,6 +863,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
+out_remove_ida:
+ ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_cqs:
@@ -877,21 +882,22 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
struct nvmet_subsys *subsys = ctrl->subsys;
- nvmet_stop_keep_alive_timer(ctrl);
-
mutex_lock(&subsys->lock);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ nvmet_stop_keep_alive_timer(ctrl);
+
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
- nvmet_subsys_put(subsys);
kfree(ctrl->sqs);
kfree(ctrl->cqs);
kfree(ctrl);
+
+ nvmet_subsys_put(subsys);
}
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index db3bf6b8bf9e..19e9e42ae943 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put;
}
- pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
out:
kfree(d);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 5fd86039e353..9b39a6cb1935 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1697,31 +1697,12 @@ static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
struct scatterlist *sg;
- struct page *page;
unsigned int nent;
- u32 page_len, length;
- int i = 0;
- length = fod->req.transfer_len;
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
if (!sg)
goto out;
- sg_init_table(sg, nent);
-
- while (length) {
- page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
-
fod->data_sg = sg;
fod->data_sg_cnt = nent;
fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
@@ -1731,14 +1712,6 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
return 0;
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- fod->data_sg = NULL;
- fod->data_sg_cnt = 0;
out:
return NVME_SC_INTERNAL;
}
@@ -1746,18 +1719,13 @@ out:
static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
- struct scatterlist *sg;
- int count;
-
if (!fod->data_sg || !fod->data_sg_cnt)
return;
fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
- for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
- __free_page(sg_page(sg));
- kfree(fod->data_sg);
+ sgl_free(fod->data_sg);
fod->data_sg = NULL;
fod->data_sg_cnt = 0;
}
@@ -2522,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
- /* a FC port can only be 1 nvmet port id */
- if (!tgtport->port) {
- tgtport->port = port;
- port->priv = tgtport;
- nvmet_fc_tgtport_get(tgtport);
- ret = 0;
- } else
- ret = -EALREADY;
+ tgtport->port = port;
+ ret = 0;
break;
}
}
@@ -2540,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
- struct nvmet_fc_tgtport *tgtport = port->priv;
- unsigned long flags;
- bool matched = false;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- if (tgtport->port == port) {
- matched = true;
- tgtport->port = NULL;
- }
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
-
- if (matched)
- nvmet_fc_tgtport_put(tgtport);
+ /* nothing to do */
}
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 6a018a0bd6ce..34712def81b1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -204,6 +204,10 @@ struct fcloop_lport {
struct completion unreg_done;
};
+struct fcloop_lport_priv {
+ struct fcloop_lport *lport;
+};
+
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
@@ -238,21 +242,32 @@ struct fcloop_lsreq {
int status;
};
+enum {
+ INI_IO_START = 0,
+ INI_IO_ACTIVE = 1,
+ INI_IO_ABORTED = 2,
+ INI_IO_COMPLETED = 3,
+};
+
struct fcloop_fcpreq {
struct fcloop_tport *tport;
struct nvmefc_fcp_req *fcpreq;
spinlock_t reqlock;
u16 status;
+ u32 inistate;
bool active;
bool aborted;
- struct work_struct work;
+ struct kref ref;
+ struct work_struct fcp_rcv_work;
+ struct work_struct abort_rcv_work;
+ struct work_struct tio_done_work;
struct nvmefc_tgt_fcp_req tgt_fcp_req;
};
struct fcloop_ini_fcpreq {
struct nvmefc_fcp_req *fcpreq;
struct fcloop_fcpreq *tfcp_req;
- struct work_struct iniwork;
+ spinlock_t inilock;
};
static inline struct fcloop_lsreq *
@@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
return 0;
}
-/*
- * FCP IO operation done by initiator abort.
- * call back up initiator "done" flows.
- */
static void
-fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
+fcloop_tfcp_req_free(struct kref *ref)
{
- struct fcloop_ini_fcpreq *inireq =
- container_of(work, struct fcloop_ini_fcpreq, iniwork);
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(ref, struct fcloop_fcpreq, ref);
+
+ kfree(tfcp_req);
+}
+
+static void
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
+{
+ kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
+}
+
+static int
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
+{
+ return kref_get_unless_zero(&tfcp_req->ref);
+}
+
+static void
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
+ struct fcloop_fcpreq *tfcp_req, int status)
+{
+ struct fcloop_ini_fcpreq *inireq = NULL;
+
+ if (fcpreq) {
+ inireq = fcpreq->private;
+ spin_lock(&inireq->inilock);
+ inireq->tfcp_req = NULL;
+ spin_unlock(&inireq->inilock);
+
+ fcpreq->status = status;
+ fcpreq->done(fcpreq);
+ }
+
+ /* release original io reference on tgt struct */
+ fcloop_tfcp_req_put(tfcp_req);
+}
+
+static void
+fcloop_fcp_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ int ret = 0;
+ bool aborted = false;
+
+ spin_lock(&tfcp_req->reqlock);
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ tfcp_req->inistate = INI_IO_ACTIVE;
+ break;
+ case INI_IO_ABORTED:
+ aborted = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(aborted))
+ ret = -ECANCELED;
+ else
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+ if (ret)
+ fcloop_call_host_done(fcpreq, tfcp_req, ret);
+
+ return;
+}
+
+static void
+fcloop_fcp_abort_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ struct nvmefc_fcp_req *fcpreq;
+ bool completed = false;
+
+ spin_lock(&tfcp_req->reqlock);
+ fcpreq = tfcp_req->fcpreq;
+ switch (tfcp_req->inistate) {
+ case INI_IO_ABORTED:
+ break;
+ case INI_IO_COMPLETED:
+ completed = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(completed)) {
+ /* remove reference taken in original abort downcall */
+ fcloop_tfcp_req_put(tfcp_req);
+ return;
+ }
- inireq->fcpreq->done(inireq->fcpreq);
+ if (tfcp_req->tport->targetport)
+ nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req);
+
+ spin_lock(&tfcp_req->reqlock);
+ tfcp_req->fcpreq = NULL;
+ spin_unlock(&tfcp_req->reqlock);
+
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ /* call_host_done releases reference for abort downcall */
}
/*
@@ -364,20 +484,15 @@ static void
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
{
struct fcloop_fcpreq *tfcp_req =
- container_of(work, struct fcloop_fcpreq, work);
- struct fcloop_tport *tport = tfcp_req->tport;
+ container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
spin_lock(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
+ tfcp_req->inistate = INI_IO_COMPLETED;
spin_unlock(&tfcp_req->reqlock);
- if (tport->remoteport && fcpreq) {
- fcpreq->status = tfcp_req->status;
- fcpreq->done(fcpreq);
- }
-
- kfree(tfcp_req);
+ fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
- int ret = 0;
if (!rport->targetport)
return -ECONNREFUSED;
@@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
inireq->fcpreq = fcpreq;
inireq->tfcp_req = tfcp_req;
- INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
+ spin_lock_init(&inireq->inilock);
+
tfcp_req->fcpreq = fcpreq;
tfcp_req->tport = rport->targetport->private;
+ tfcp_req->inistate = INI_IO_START;
spin_lock_init(&tfcp_req->reqlock);
- INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+ INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
+ INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+ kref_init(&tfcp_req->ref);
- ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
- fcpreq->cmdaddr, fcpreq->cmdlen);
+ schedule_work(&tfcp_req->fcp_rcv_work);
- return ret;
+ return 0;
}
static void
@@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->work);
+ schedule_work(&tfcp_req->tio_done_work);
}
static void
@@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
void *hw_queue_handle,
struct nvmefc_fcp_req *fcpreq)
{
- struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
- struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
+ struct fcloop_fcpreq *tfcp_req;
+ bool abortio = true;
+
+ spin_lock(&inireq->inilock);
+ tfcp_req = inireq->tfcp_req;
+ if (tfcp_req)
+ fcloop_tfcp_req_get(tfcp_req);
+ spin_unlock(&inireq->inilock);
if (!tfcp_req)
/* abort has already been called */
return;
- if (rport->targetport)
- nvmet_fc_rcv_fcp_abort(rport->targetport,
- &tfcp_req->tgt_fcp_req);
-
/* break initiator/target relationship for io */
spin_lock(&tfcp_req->reqlock);
- inireq->tfcp_req = NULL;
- tfcp_req->fcpreq = NULL;
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ case INI_IO_ACTIVE:
+ tfcp_req->inistate = INI_IO_ABORTED;
+ break;
+ case INI_IO_COMPLETED:
+ abortio = false;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
spin_unlock(&tfcp_req->reqlock);
- /* post the aborted io completion */
- fcpreq->status = -ECANCELED;
- schedule_work(&inireq->iniwork);
+ if (abortio)
+ /* leave the reference while the work item is scheduled */
+ WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ else {
+ /*
+ * as the io has already had the done callback made,
+ * nothing more to do. So release the reference taken above
+ */
+ fcloop_tfcp_req_put(tfcp_req);
+ }
}
static void
@@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
- struct fcloop_lport *lport = localport->private;
+ struct fcloop_lport_priv *lport_priv = localport->private;
+ struct fcloop_lport *lport = lport_priv->lport;
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
@@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* sizes of additional private data for data structures */
- .local_priv_sz = sizeof(struct fcloop_lport),
+ .local_priv_sz = sizeof(struct fcloop_lport_priv),
.remote_priv_sz = sizeof(struct fcloop_rport),
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
@@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */
- .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_OPDONE_IN_ISR,
+ .target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
};
@@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
struct fcloop_ctrl_options *opts;
struct nvme_fc_local_port *localport;
struct fcloop_lport *lport;
- int ret;
+ struct fcloop_lport_priv *lport_priv;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ lport = kzalloc(sizeof(*lport), GFP_KERNEL);
+ if (!lport)
+ return -ENOMEM;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
- return -ENOMEM;
+ goto out_free_lport;
ret = fcloop_parse_options(opts, buf);
if (ret)
@@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
if (!ret) {
- unsigned long flags;
-
/* success */
- lport = localport->private;
+ lport_priv = localport->private;
+ lport_priv->lport = lport;
+
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
spin_unlock_irqrestore(&fcloop_lock, flags);
-
- /* mark all of the input buffer consumed */
- ret = count;
}
out_free_opts:
kfree(opts);
+out_free_lport:
+ /* free only if we're going to fail */
+ if (ret)
+ kfree(lport);
+
return ret ? ret : count;
}
@@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
wait_for_completion(&lport->unreg_done);
+ kfree(lport);
+
return ret;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 1e21b286f299..7991ec3a17db 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop",
+ .module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl,
};
@@ -716,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void)
nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
module_init(nvme_loop_init_module);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 49912909c298..978e169c11bf 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
-static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
-{
- struct scatterlist *sg;
- int count;
-
- if (!sgl || !nents)
- return;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
- kfree(sgl);
-}
-
-static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
- u32 length)
-{
- struct scatterlist *sg;
- struct page *page;
- unsigned int nent;
- int i = 0;
-
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- goto out;
-
- sg_init_table(sg, nent);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
-out:
- return NVME_SC_INTERNAL;
-}
-
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != &rsp->cmd->inline_sg)
- nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+ sgl_free(rsp->req.sg);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
- u16 status;
/* no data command? */
if (!len)
return 0;
- status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
- len);
- if (status)
- return status;
+ rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
+ if (!rsp->req.sg)
+ return NVME_SC_INTERNAL;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -976,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{
- pr_info("freeing queue %d\n", queue->idx);
+ pr_debug("freeing queue %d\n", queue->idx);
nvmet_sq_destroy(&queue->nvme_sq);
@@ -1558,25 +1503,9 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- struct nvmet_rdma_queue *queue;
-
nvmet_unregister_transport(&nvmet_rdma_ops);
-
- flush_scheduled_work();
-
- mutex_lock(&nvmet_rdma_queue_mutex);
- while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
- struct nvmet_rdma_queue, queue_list))) {
- list_del_init(&queue->queue_list);
-
- mutex_unlock(&nvmet_rdma_queue_mutex);
- __nvmet_rdma_queue_disconnect(queue);
- mutex_lock(&nvmet_rdma_queue_mutex);
- }
- mutex_unlock(&nvmet_rdma_queue_mutex);
-
- flush_scheduled_work();
ib_unregister_client(&nvmet_rdma_ib_client);
+ WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
ida_destroy(&nvmet_rdma_queue_ida);
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 26618ba8f92a..a9d6fe86585b 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -316,6 +316,32 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
EXPORT_SYMBOL(of_get_cpu_node);
/**
+ * of_cpu_node_to_id: Get the logical CPU number for a given device_node
+ *
+ * @cpu_node: Pointer to the device_node for CPU.
+ *
+ * Returns the logical CPU number of the given CPU device_node.
+ * Returns -ENODEV if the CPU is not found.
+ */
+int of_cpu_node_to_id(struct device_node *cpu_node)
+{
+ int cpu;
+ bool found = false;
+ struct device_node *np;
+
+ for_each_possible_cpu(cpu) {
+ np = of_cpu_device_node_get(cpu);
+ found = (cpu_node == np);
+ of_node_put(np);
+ if (found)
+ return cpu;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(of_cpu_node_to_id);
+
+/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 8ad33a44a7b8..f25d36358187 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -981,10 +981,18 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return (void *)of_device_get_match_data(dev);
+}
+
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
.device_is_available = of_fwnode_device_is_available,
+ .device_get_match_data = of_fwnode_device_get_match_data,
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile
index e70ceb406fe9..6ce6aefacc81 100644
--- a/drivers/opp/Makefile
+++ b/drivers/opp/Makefile
@@ -2,3 +2,4 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-opp-supply.o
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
new file mode 100644
index 000000000000..370eff3acd8a
--- /dev/null
+++ b/drivers/opp/ti-opp-supply.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon <nm@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * TI OPP supply driver that provides override into the regulator control
+ * for generic opp core to handle devices with ABB regulator and/or
+ * SmartReflex Class0.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * struct ti_opp_supply_optimum_voltage_table - optimized voltage table
+ * @reference_uv: reference voltage (usually Nominal voltage)
+ * @optimized_uv: Optimized voltage from efuse
+ */
+struct ti_opp_supply_optimum_voltage_table {
+ unsigned int reference_uv;
+ unsigned int optimized_uv;
+};
+
+/**
+ * struct ti_opp_supply_data - OMAP specific opp supply data
+ * @vdd_table: Optimized voltage mapping table
+ * @num_vdd_table: number of entries in vdd_table
+ * @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ */
+struct ti_opp_supply_data {
+ struct ti_opp_supply_optimum_voltage_table *vdd_table;
+ u32 num_vdd_table;
+ u32 vdd_absolute_max_voltage_uv;
+};
+
+static struct ti_opp_supply_data opp_data;
+
+/**
+ * struct ti_opp_supply_of_data - device tree match data
+ * @flags: specific type of opp supply
+ * @efuse_voltage_mask: mask required for efuse register representing voltage
+ * @efuse_voltage_uv: Are the efuse entries in micro-volts? if not, assume
+ * milli-volts.
+ */
+struct ti_opp_supply_of_data {
+#define OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE BIT(1)
+#define OPPDM_HAS_NO_ABB BIT(2)
+ const u8 flags;
+ const u32 efuse_voltage_mask;
+ const bool efuse_voltage_uv;
+};
+
+/**
+ * _store_optimized_voltages() - store optimized voltages
+ * @dev: ti opp supply device for which we need to store info
+ * @data: data specific to the device
+ *
+ * Picks up efuse based optimized voltages for VDD unique per device and
+ * stores it in internal data structure for use during transition requests.
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int _store_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ void __iomem *base;
+ struct property *prop;
+ struct resource *res;
+ const __be32 *val;
+ int proplen, i;
+ int ret = 0;
+ struct ti_opp_supply_optimum_voltage_table *table;
+ const struct ti_opp_supply_of_data *of_data = dev_get_drvdata(dev);
+
+ /* pick up Efuse based voltages */
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Unable to get IO resource\n");
+ ret = -ENODEV;
+ goto out_map;
+ }
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "Unable to map Efuse registers\n");
+ ret = -ENOMEM;
+ goto out_map;
+ }
+
+ /* Fetch efuse-settings. */
+ prop = of_find_property(dev->of_node, "ti,efuse-settings", NULL);
+ if (!prop) {
+ dev_err(dev, "No 'ti,efuse-settings' property found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ proplen = prop->length / sizeof(int);
+ data->num_vdd_table = proplen / 2;
+ /* Verify for corrupted OPP entries in dt */
+ if (data->num_vdd_table * 2 * sizeof(int) != prop->length) {
+ dev_err(dev, "Invalid 'ti,efuse-settings'\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "ti,absolute-max-voltage-uv",
+ &data->vdd_absolute_max_voltage_uv);
+ if (ret) {
+ dev_err(dev, "ti,absolute-max-voltage-uv is missing\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ table = kzalloc(sizeof(*data->vdd_table) *
+ data->num_vdd_table, GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->vdd_table = table;
+
+ val = prop->value;
+ for (i = 0; i < data->num_vdd_table; i++, table++) {
+ u32 efuse_offset;
+ u32 tmp;
+
+ table->reference_uv = be32_to_cpup(val++);
+ efuse_offset = be32_to_cpup(val++);
+
+ tmp = readl(base + efuse_offset);
+ tmp &= of_data->efuse_voltage_mask;
+ tmp >>= __ffs(of_data->efuse_voltage_mask);
+
+ table->optimized_uv = of_data->efuse_voltage_uv ? tmp :
+ tmp * 1000;
+
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d vset=%d\n",
+ i, efuse_offset, table->reference_uv,
+ table->optimized_uv);
+
+ /*
+ * Some older samples might not have optimized efuse
+ * Use reference voltage for those - just add debug message
+ * for them.
+ */
+ if (!table->optimized_uv) {
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d:vset0\n",
+ i, efuse_offset, table->reference_uv);
+ table->optimized_uv = table->reference_uv;
+ }
+ }
+out:
+ iounmap(base);
+out_map:
+ return ret;
+}
+
+/**
+ * _free_optimized_voltages() - free resources for optvoltages
+ * @dev: device for which we need to free info
+ * @data: data specific to the device
+ */
+static void _free_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ kfree(data->vdd_table);
+ data->vdd_table = NULL;
+ data->num_vdd_table = 0;
+}
+
+/**
+ * _get_optimal_vdd_voltage() - Finds optimal voltage for the supply
+ * @dev: device for which we need to find info
+ * @data: data specific to the device
+ * @reference_uv: reference voltage (OPP voltage) for which we need value
+ *
+ * Return: if a match is found, return optimized voltage, else return
+ * reference_uv, also return reference_uv if no optimization is needed.
+ */
+static int _get_optimal_vdd_voltage(struct device *dev,
+ struct ti_opp_supply_data *data,
+ int reference_uv)
+{
+ int i;
+ struct ti_opp_supply_optimum_voltage_table *table;
+
+ if (!data->num_vdd_table)
+ return reference_uv;
+
+ table = data->vdd_table;
+ if (!table)
+ return -EINVAL;
+
+ /* Find a exact match - this list is usually very small */
+ for (i = 0; i < data->num_vdd_table; i++, table++)
+ if (table->reference_uv == reference_uv)
+ return table->optimized_uv;
+
+ /* IF things are screwed up, we'd make a mess on console.. ratelimit */
+ dev_err_ratelimited(dev, "%s: Failed optimized voltage match for %d\n",
+ __func__, reference_uv);
+ return reference_uv;
+}
+
+static int _opp_set_voltage(struct device *dev,
+ struct dev_pm_opp_supply *supply,
+ int new_target_uv, struct regulator *reg,
+ char *reg_name)
+{
+ int ret;
+ unsigned long vdd_uv, uv_max;
+
+ if (new_target_uv)
+ vdd_uv = new_target_uv;
+ else
+ vdd_uv = supply->u_volt;
+
+ /*
+ * If we do have an absolute max voltage specified, then we should
+ * use that voltage instead to allow for cases where the voltage rails
+ * are ganged (example if we set the max for an opp as 1.12v, and
+ * the absolute max is 1.5v, for another rail to get 1.25v, it cannot
+ * be achieved if the regulator is constrainted to max of 1.12v, even
+ * if it can function at 1.25v
+ */
+ if (opp_data.vdd_absolute_max_voltage_uv)
+ uv_max = opp_data.vdd_absolute_max_voltage_uv;
+ else
+ uv_max = supply->u_volt_max;
+
+ if (vdd_uv > uv_max ||
+ vdd_uv < supply->u_volt_min ||
+ supply->u_volt_min > uv_max) {
+ dev_warn(dev,
+ "Invalid range voltages [Min:%lu target:%lu Max:%lu]\n",
+ supply->u_volt_min, vdd_uv, uv_max);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s scaling to %luuV[min %luuV max %luuV]\n", reg_name,
+ vdd_uv, supply->u_volt_min,
+ uv_max);
+
+ ret = regulator_set_voltage_triplet(reg,
+ supply->u_volt_min,
+ vdd_uv,
+ uv_max);
+ if (ret) {
+ dev_err(dev, "%s failed for %luuV[min %luuV max %luuV]\n",
+ reg_name, vdd_uv, supply->u_volt_min,
+ uv_max);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ti_opp_supply_set_opp() - do the opp supply transition
+ * @data: information on regulators and new and old opps provided by
+ * opp core to use in transition
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
+ struct device *dev = data->dev;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct clk *clk = data->clk;
+ struct regulator *vdd_reg = data->regulators[0];
+ struct regulator *vbb_reg = data->regulators[1];
+ int vdd_uv;
+ int ret;
+
+ vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
+ new_supply_vbb->u_volt);
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_voltage;
+
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_freq;
+
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ ret = clk_set_rate(clk, old_freq);
+ if (ret)
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply_vdd->u_volt) {
+ ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ return ret;
+
+ ret = _opp_set_voltage(dev, old_supply_vdd, 0, vdd_reg,
+ "vdd");
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct ti_opp_supply_of_data omap_generic_of_data = {
+};
+
+static const struct ti_opp_supply_of_data omap_omap5_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct ti_opp_supply_of_data omap_omap5core_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE | OPPDM_HAS_NO_ABB,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct of_device_id ti_opp_supply_of_match[] = {
+ {.compatible = "ti,omap-opp-supply", .data = &omap_generic_of_data},
+ {.compatible = "ti,omap5-opp-supply", .data = &omap_omap5_of_data},
+ {.compatible = "ti,omap5-core-opp-supply",
+ .data = &omap_omap5core_of_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_opp_supply_of_match);
+
+static int ti_opp_supply_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev = get_cpu_device(0);
+ const struct of_device_id *match;
+ const struct ti_opp_supply_of_data *of_data;
+ int ret = 0;
+
+ match = of_match_device(ti_opp_supply_of_match, dev);
+ if (!match) {
+ /* We do not expect this to happen */
+ dev_err(dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+ if (!match->data) {
+ /* Again, unlikely.. but mistakes do happen */
+ dev_err(dev, "%s: Bad data in match\n", __func__);
+ return -EINVAL;
+ }
+ of_data = match->data;
+
+ dev_set_drvdata(dev, (void *)of_data);
+
+ /* If we need optimized voltage */
+ if (of_data->flags & OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE) {
+ ret = _store_optimized_voltages(dev, &opp_data);
+ if (ret)
+ return ret;
+ }
+
+ ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
+ ti_opp_supply_set_opp));
+ if (ret)
+ _free_optimized_voltages(dev, &opp_data);
+
+ return ret;
+}
+
+static struct platform_driver ti_opp_supply_driver = {
+ .probe = ti_opp_supply_probe,
+ .driver = {
+ .name = "ti_opp_supply",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ti_opp_supply_of_match),
+ },
+};
+module_platform_driver(ti_opp_supply_driver);
+
+MODULE_DESCRIPTION("Texas Instruments OMAP OPP Supply driver");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 14fd865a5120..5958c8dda4e3 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev)
pm_generic_complete(dev);
/* Resume device if platform firmware has put it in reset-power-on */
- if (dev->power.direct_complete && pm_resume_via_firmware()) {
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
pci_power_t pre_sleep_state = pci_dev->current_state;
pci_update_current_state(pci_dev, pci_dev->current_state);
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * pci_pm_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
+
return 0;
}
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
@@ -953,7 +968,7 @@ static int pci_pm_freeze_late(struct device *dev)
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- return pm_generic_freeze_late(dev);;
+ return pm_generic_freeze_late(dev);
}
static int pci_pm_freeze_noirq(struct device *dev)
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ffbf4e723527..fb1c1bb87316 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -150,6 +150,9 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 730cc897b94d..fab143a17f9c 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -510,11 +510,11 @@ out:
return -EBADMSG;
}
-static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
+static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(filp, &stuser->comp.wait, wait);
poll_wait(filp, &stdev->event_wq, wait);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index b8f44b068fc6..da5724cd89cf 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -17,6 +17,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config ARM_DSU_PMU
+ tristate "ARM DynamIQ Shared Unit (DSU) PMU"
+ depends on ARM64
+ help
+ Provides support for performance monitor unit in ARM DynamIQ Shared
+ Unit (DSU). The DSU integrates one or more cores with an L3 memory
+ system, control logic. The PMU allows counting various events related
+ to DSU.
+
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 710a0135bd61..c2f27419bdf0 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
new file mode 100644
index 000000000000..93c50e377507
--- /dev/null
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -0,0 +1,843 @@
+/*
+ * ARM DynamIQ Shared Unit (DSU) PMU driver
+ *
+ * Copyright (C) ARM Limited, 2017.
+ *
+ * Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#define PMUNAME "arm_dsu"
+#define DRVNAME PMUNAME "_pmu"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <asm/arm_dsu_pmu.h>
+#include <asm/local64.h>
+
+/* PMU event codes */
+#define DSU_PMU_EVT_CYCLES 0x11
+#define DSU_PMU_EVT_CHAIN 0x1e
+
+#define DSU_PMU_MAX_COMMON_EVENTS 0x40
+
+#define DSU_PMU_MAX_HW_CNTRS 32
+#define DSU_PMU_HW_COUNTER_MASK (DSU_PMU_MAX_HW_CNTRS - 1)
+
+#define CLUSTERPMCR_E BIT(0)
+#define CLUSTERPMCR_P BIT(1)
+#define CLUSTERPMCR_C BIT(2)
+#define CLUSTERPMCR_N_SHIFT 11
+#define CLUSTERPMCR_N_MASK 0x1f
+#define CLUSTERPMCR_IDCODE_SHIFT 16
+#define CLUSTERPMCR_IDCODE_MASK 0xff
+#define CLUSTERPMCR_IMP_SHIFT 24
+#define CLUSTERPMCR_IMP_MASK 0xff
+#define CLUSTERPMCR_RES_MASK 0x7e8
+#define CLUSTERPMCR_RES_VAL 0x40
+
+#define DSU_ACTIVE_CPU_MASK 0x0
+#define DSU_ASSOCIATED_CPU_MASK 0x1
+
+/*
+ * We use the index of the counters as they appear in the counter
+ * bit maps in the PMU registers (e.g CLUSTERPMSELR).
+ * i.e,
+ * counter 0 - Bit 0
+ * counter 1 - Bit 1
+ * ...
+ * Cycle counter - Bit 31
+ */
+#define DSU_PMU_IDX_CYCLE_COUNTER 31
+
+/* All event counters are 32bit, with a 64bit Cycle counter */
+#define DSU_PMU_COUNTER_WIDTH(idx) \
+ (((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32)
+
+#define DSU_PMU_COUNTER_MASK(idx) \
+ GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0)
+
+#define DSU_EXT_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { \
+ .attr = __ATTR(_name, 0444, _func, NULL), \
+ .var = (void *)_config \
+ } \
+ })[0].attr.attr)
+
+#define DSU_EVENT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config)
+
+#define DSU_FORMAT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config)
+
+#define DSU_CPUMASK_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config)
+
+struct dsu_hw_events {
+ DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS);
+ struct perf_event *events[DSU_PMU_MAX_HW_CNTRS];
+};
+
+/*
+ * struct dsu_pmu - DSU PMU descriptor
+ *
+ * @pmu_lock : Protects accesses to DSU PMU register from normal vs
+ * interrupt handler contexts.
+ * @hw_events : Holds the event counter state.
+ * @associated_cpus : CPUs attached to the DSU.
+ * @active_cpu : CPU to which the PMU is bound for accesses.
+ * @cpuhp_node : Node for CPU hotplug notifier link.
+ * @num_counters : Number of event counters implemented by the PMU,
+ * excluding the cycle counter.
+ * @irq : Interrupt line for counter overflow.
+ * @cpmceid_bitmap : Bitmap for the availability of architected common
+ * events (event_code < 0x40).
+ */
+struct dsu_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ raw_spinlock_t pmu_lock;
+ struct dsu_hw_events hw_events;
+ cpumask_t associated_cpus;
+ cpumask_t active_cpu;
+ struct hlist_node cpuhp_node;
+ s8 num_counters;
+ int irq;
+ DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS);
+};
+
+static unsigned long dsu_pmu_cpuhp_state;
+
+static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
+{
+ return container_of(pmu, struct dsu_pmu, pmu);
+}
+
+static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
+ (unsigned long)eattr->var);
+}
+
+static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t dsu_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ unsigned long mask_id = (unsigned long)eattr->var;
+ const cpumask_t *cpumask;
+
+ switch (mask_id) {
+ case DSU_ACTIVE_CPU_MASK:
+ cpumask = &dsu_pmu->active_cpu;
+ break;
+ case DSU_ASSOCIATED_CPU_MASK:
+ cpumask = &dsu_pmu->associated_cpus;
+ break;
+ default:
+ return 0;
+ }
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
+}
+
+static struct attribute *dsu_pmu_format_attrs[] = {
+ DSU_FORMAT_ATTR(event, "config:0-31"),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = dsu_pmu_format_attrs,
+};
+
+static struct attribute *dsu_pmu_event_attrs[] = {
+ DSU_EVENT_ATTR(cycles, 0x11),
+ DSU_EVENT_ATTR(bus_access, 0x19),
+ DSU_EVENT_ATTR(memory_error, 0x1a),
+ DSU_EVENT_ATTR(bus_cycles, 0x1d),
+ DSU_EVENT_ATTR(l3d_cache_allocate, 0x29),
+ DSU_EVENT_ATTR(l3d_cache_refill, 0x2a),
+ DSU_EVENT_ATTR(l3d_cache, 0x2b),
+ DSU_EVENT_ATTR(l3d_cache_wb, 0x2c),
+ NULL,
+};
+
+static umode_t
+dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int unused)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr.attr);
+ unsigned long evt = (unsigned long)eattr->var;
+
+ return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0;
+}
+
+static const struct attribute_group dsu_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = dsu_pmu_event_attrs,
+ .is_visible = dsu_pmu_event_attr_is_visible,
+};
+
+static struct attribute *dsu_pmu_cpumask_attrs[] = {
+ DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK),
+ DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_cpumask_attr_group = {
+ .attrs = dsu_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *dsu_pmu_attr_groups[] = {
+ &dsu_pmu_cpumask_attr_group,
+ &dsu_pmu_events_attr_group,
+ &dsu_pmu_format_attr_group,
+ NULL,
+};
+
+static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
+{
+ struct cpumask online_supported;
+
+ cpumask_and(&online_supported,
+ &dsu_pmu->associated_cpus, cpu_online_mask);
+ return cpumask_any_but(&online_supported, cpu);
+}
+
+static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
+{
+ return (idx < dsu_pmu->num_counters) ||
+ (idx == DSU_PMU_IDX_CYCLE_COUNTER);
+}
+
+static inline u64 dsu_pmu_read_counter(struct perf_event *event)
+{
+ u64 val;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return 0;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying reading invalid counter %d\n", idx);
+ return 0;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ val = __dsu_pmu_read_pmccntr();
+ else
+ val = __dsu_pmu_read_counter(idx);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+
+ return val;
+}
+
+static void dsu_pmu_write_counter(struct perf_event *event, u64 val)
+{
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "writing to invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ __dsu_pmu_write_pmccntr(val);
+ else
+ __dsu_pmu_write_counter(idx, val);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ int idx;
+ unsigned long evtype = event->attr.config;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ unsigned long *used_mask = hw_events->used_mask;
+
+ if (evtype == DSU_PMU_EVT_CYCLES) {
+ if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, used_mask))
+ return -EAGAIN;
+ return DSU_PMU_IDX_CYCLE_COUNTER;
+ }
+
+ idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters);
+ if (idx >= dsu_pmu->num_counters)
+ return -EAGAIN;
+ set_bit(idx, hw_events->used_mask);
+ return idx;
+}
+
+static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_counter_interrupt_enable(idx);
+ __dsu_pmu_enable_counter(idx);
+}
+
+static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_disable_counter(idx);
+ __dsu_pmu_counter_interrupt_disable(idx);
+}
+
+static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu,
+ struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ unsigned long flags;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying to set invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ __dsu_pmu_set_event(idx, event->hw.config_base);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_count, new_count;
+
+ do {
+ /* We may also be called from the irq handler */
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = dsu_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) !=
+ prev_count);
+ delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx);
+ local64_add(delta, &event->count);
+}
+
+static void dsu_pmu_read(struct perf_event *event)
+{
+ dsu_pmu_event_update(event);
+}
+
+static inline u32 dsu_pmu_get_reset_overflow(void)
+{
+ return __dsu_pmu_get_reset_overflow();
+}
+
+/**
+ * dsu_pmu_set_event_period: Set the period for the counter.
+ *
+ * All DSU PMU event counters, except the cycle counter are 32bit
+ * counters. To handle cases of extreme interrupt latency, we program
+ * the counter with half of the max count for the counters.
+ */
+static void dsu_pmu_set_event_period(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1;
+
+ local64_set(&event->hw.prev_count, val);
+ dsu_pmu_write_counter(event, val);
+}
+
+static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev)
+{
+ int i;
+ bool handled = false;
+ struct dsu_pmu *dsu_pmu = dev;
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ unsigned long overflow;
+
+ overflow = dsu_pmu_get_reset_overflow();
+ if (!overflow)
+ return IRQ_NONE;
+
+ for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) {
+ struct perf_event *event = hw_events->events[i];
+
+ if (!event)
+ continue;
+ dsu_pmu_event_update(event);
+ dsu_pmu_set_event_period(event);
+ handled = true;
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void dsu_pmu_start(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ /* We always reprogram the counter */
+ if (pmu_flags & PERF_EF_RELOAD)
+ WARN_ON(!(event->hw.state & PERF_HES_UPTODATE));
+ dsu_pmu_set_event_period(event);
+ if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER)
+ dsu_pmu_set_event(dsu_pmu, event);
+ event->hw.state = 0;
+ dsu_pmu_enable_counter(dsu_pmu, event->hw.idx);
+}
+
+static void dsu_pmu_stop(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+ dsu_pmu_disable_counter(dsu_pmu, event->hw.idx);
+ dsu_pmu_event_update(event);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dsu_pmu_add(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return -ENOENT;
+
+ idx = dsu_pmu_get_event_idx(hw_events, event);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ hw_events->events[idx] = event;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ dsu_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ return 0;
+}
+
+static void dsu_pmu_del(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ dsu_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+ perf_event_update_userpage(event);
+}
+
+static void dsu_pmu_enable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ /* If no counters are added, skip enabling the PMU */
+ if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS))
+ return;
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr |= CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_disable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr &= ~CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static bool dsu_pmu_validate_event(struct pmu *pmu,
+ struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ if (is_software_event(event))
+ return true;
+ /* Reject groups spanning multiple HW PMUs. */
+ if (event->pmu != pmu)
+ return false;
+ return dsu_pmu_get_event_idx(hw_events, event) >= 0;
+}
+
+/*
+ * Make sure the group of events can be scheduled at once
+ * on the PMU.
+ */
+static bool dsu_pmu_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct dsu_hw_events fake_hw;
+
+ if (event->group_leader == event)
+ return true;
+
+ memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
+ return false;
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
+ return false;
+ }
+ return dsu_pmu_validate_event(event->pmu, &fake_hw, event);
+}
+
+static int dsu_pmu_event_init(struct perf_event *event)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* We don't support sampling */
+ if (is_sampling_event(event)) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* We cannot support task bound events */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
+ return -EINVAL;
+ }
+
+ if (has_branch_stack(event) ||
+ event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
+ return -EINVAL;
+ }
+
+ if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
+ dev_dbg(dsu_pmu->pmu.dev,
+ "Requested cpu is not associated with the DSU\n");
+ return -EINVAL;
+ }
+ /*
+ * Choose the current active CPU to read the events. We don't want
+ * to migrate the event contexts, irq handling etc to the requested
+ * CPU. As long as the requested CPU is within the same DSU, we
+ * are fine.
+ */
+ event->cpu = cpumask_first(&dsu_pmu->active_cpu);
+ if (event->cpu >= nr_cpu_ids)
+ return -EINVAL;
+ if (!dsu_pmu_validate_group(event))
+ return -EINVAL;
+
+ event->hw.config_base = event->attr.config;
+ return 0;
+}
+
+static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu;
+
+ dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL);
+ if (!dsu_pmu)
+ return ERR_PTR(-ENOMEM);
+
+ raw_spin_lock_init(&dsu_pmu->pmu_lock);
+ /*
+ * Initialise the number of counters to -1, until we probe
+ * the real number on a connected CPU.
+ */
+ dsu_pmu->num_counters = -1;
+ return dsu_pmu;
+}
+
+/**
+ * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster.
+ */
+static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask)
+{
+ int i = 0, n, cpu;
+ struct device_node *cpu_node;
+
+ n = of_count_phandle_with_args(dev, "cpus", NULL);
+ if (n <= 0)
+ return -ENODEV;
+ for (; i < n; i++) {
+ cpu_node = of_parse_phandle(dev, "cpus", i);
+ if (!cpu_node)
+ break;
+ cpu = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+ /*
+ * We have to ignore the failures here and continue scanning
+ * the list to handle cases where the nr_cpus could be capped
+ * in the running kernel.
+ */
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, mask);
+ }
+ return 0;
+}
+
+/*
+ * dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster.
+ */
+static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
+{
+ u64 num_counters;
+ u32 cpmceid[2];
+
+ num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) &
+ CLUSTERPMCR_N_MASK;
+ /* We can only support up to 31 independent counters */
+ if (WARN_ON(num_counters > 31))
+ num_counters = 31;
+ dsu_pmu->num_counters = num_counters;
+ if (!dsu_pmu->num_counters)
+ return;
+ cpmceid[0] = __dsu_pmu_read_pmceid(0);
+ cpmceid[1] = __dsu_pmu_read_pmceid(1);
+ bitmap_from_u32array(dsu_pmu->cpmceid_bitmap,
+ DSU_PMU_MAX_COMMON_EVENTS,
+ cpmceid,
+ ARRAY_SIZE(cpmceid));
+}
+
+static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
+{
+ cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
+ if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu))
+ pr_warn("Failed to set irq affinity to %d\n", cpu);
+}
+
+/*
+ * dsu_pmu_init_pmu: Initialise the DSU PMU configurations if
+ * we haven't done it already.
+ */
+static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu)
+{
+ if (dsu_pmu->num_counters == -1)
+ dsu_pmu_probe_pmu(dsu_pmu);
+ /* Reset the interrupt overflow mask */
+ dsu_pmu_get_reset_overflow();
+}
+
+static int dsu_pmu_device_probe(struct platform_device *pdev)
+{
+ int irq, rc;
+ struct dsu_pmu *dsu_pmu;
+ char *name;
+ static atomic_t pmu_idx = ATOMIC_INIT(-1);
+
+ dsu_pmu = dsu_pmu_alloc(pdev);
+ if (IS_ERR(dsu_pmu))
+ return PTR_ERR(dsu_pmu);
+
+ rc = dsu_pmu_dt_get_cpus(pdev->dev.of_node, &dsu_pmu->associated_cpus);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to parse the CPUs\n");
+ return rc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Failed to find IRQ\n");
+ return -EINVAL;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
+ PMUNAME, atomic_inc_return(&pmu_idx));
+ if (!name)
+ return -ENOMEM;
+ rc = devm_request_irq(&pdev->dev, irq, dsu_pmu_handle_irq,
+ IRQF_NOBALANCING, name, dsu_pmu);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to request IRQ %d\n", irq);
+ return rc;
+ }
+
+ dsu_pmu->irq = irq;
+ platform_set_drvdata(pdev, dsu_pmu);
+ rc = cpuhp_state_add_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ if (rc)
+ return rc;
+
+ dsu_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .module = THIS_MODULE,
+ .pmu_enable = dsu_pmu_enable,
+ .pmu_disable = dsu_pmu_disable,
+ .event_init = dsu_pmu_event_init,
+ .add = dsu_pmu_add,
+ .del = dsu_pmu_del,
+ .start = dsu_pmu_start,
+ .stop = dsu_pmu_stop,
+ .read = dsu_pmu_read,
+
+ .attr_groups = dsu_pmu_attr_groups,
+ };
+
+ rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
+ if (rc) {
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ }
+
+ return rc;
+}
+
+static int dsu_pmu_device_remove(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&dsu_pmu->pmu);
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id dsu_pmu_of_match[] = {
+ { .compatible = "arm,dsu-pmu", },
+ {},
+};
+
+static struct platform_driver dsu_pmu_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(dsu_pmu_of_match),
+ },
+ .probe = dsu_pmu_device_probe,
+ .remove = dsu_pmu_device_remove,
+};
+
+static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
+ return 0;
+
+ /* If the PMU is already managed, there is nothing to do */
+ if (!cpumask_empty(&dsu_pmu->active_cpu))
+ return 0;
+
+ dsu_pmu_init_pmu(dsu_pmu);
+ dsu_pmu_set_active_cpu(cpu, dsu_pmu);
+
+ return 0;
+}
+
+static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ int dst;
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
+ return 0;
+
+ dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
+ /* If there are no active CPUs in the DSU, leave IRQ disabled */
+ if (dst >= nr_cpu_ids) {
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
+ dsu_pmu_set_active_cpu(dst, dsu_pmu);
+
+ return 0;
+}
+
+static int __init dsu_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DRVNAME,
+ dsu_pmu_cpu_online,
+ dsu_pmu_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ dsu_pmu_cpuhp_state = ret;
+ return platform_driver_register(&dsu_pmu_driver);
+}
+
+static void __exit dsu_pmu_exit(void)
+{
+ platform_driver_unregister(&dsu_pmu_driver);
+ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
+}
+
+module_init(dsu_pmu_init);
+module_exit(dsu_pmu_exit);
+
+MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
+MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit");
+MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 91b224eced18..46501cc79fd7 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -82,19 +82,10 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
return -EINVAL;
}
- /* Now look up the logical CPU number */
- for_each_possible_cpu(cpu) {
- struct device_node *cpu_dn;
-
- cpu_dn = of_cpu_device_node_get(cpu);
- of_node_put(cpu_dn);
-
- if (dn == cpu_dn)
- break;
- }
-
- if (cpu >= nr_cpu_ids) {
+ cpu = of_cpu_node_to_id(dn);
+ if (cpu < 0) {
pr_warn("failed to find logical CPU for %s\n", dn->name);
+ cpu = nr_cpu_ids;
}
of_node_put(dn);
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8ce262fc2561..51b40aecb776 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1164,6 +1164,15 @@ static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
struct arm_spe_pmu *spe_pmu;
struct device *dev = &pdev->dev;
+ /*
+ * If kernelspace is unmapped when running at EL0, then the SPE
+ * buffer will fault and prematurely terminate the AUX session.
+ */
+ if (arm64_kernel_unmapped_at_el0()) {
+ dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
+ return -EPERM;
+ }
+
spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
if (!spe_pmu) {
dev_err(dev, "failed to allocate spe_pmu\n");
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 3f953db70288..8708ea3b4d6d 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -150,6 +150,9 @@ enum sata_phy_regs {
TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK = 0x3ff,
TXPMD_TX_FREQ_CTRL_CONTROL3 = 0x84,
TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
+
+ RXPMD_REG_BANK = 0x1c0,
+ RXPMD_RX_FREQ_MON_CONTROL1 = 0x87,
};
enum sata_phy_ctrl_regs {
@@ -505,8 +508,36 @@ static int brcm_sata_phy_init(struct phy *phy)
return rc;
}
+static void brcm_stb_sata_calibrate(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp = BIT(8);
+
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ ~tmp, tmp);
+}
+
+static int brcm_sata_phy_calibrate(struct phy *phy)
+{
+ struct brcm_sata_port *port = phy_get_drvdata(phy);
+ int rc = -EOPNOTSUPP;
+
+ switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_28NM:
+ case BRCM_SATA_PHY_STB_40NM:
+ brcm_stb_sata_calibrate(port);
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
static const struct phy_ops phy_ops = {
.init = brcm_sata_phy_init,
+ .calibrate = brcm_sata_phy_calibrate,
.owner = THIS_MODULE,
};
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 4571cc098b76..ce126955212c 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -63,6 +63,16 @@ config PINCTRL_AS3722
open drain configuration for the GPIO pins of AS3722 devices. It also
supports the GPIO functionality through gpiolib.
+config PINCTRL_AXP209
+ tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
+ depends on MFD_AXP20X
+ help
+ AXP PMICs provides multiple GPIOs that can be muxed for different
+ functions. This driver bundles a pinctrl driver to select the function
+ muxing and a GPIO driver to handle the GPIO when the GPIO function is
+ selected.
+ Say yes to enable pinctrl and GPIO support for the AXP209 PMIC
+
config PINCTRL_BF54x
def_bool y if BF54x
select PINCTRL_ADI2
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index d0d4844f8022..4777f1595ce2 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o
obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
+obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
new file mode 100644
index 000000000000..22d3bb0bf927
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -0,0 +1,476 @@
+/*
+ * AXP20x pinctrl and GPIO driver
+ *
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2017 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXP20X_GPIO_FUNCTIONS 0x7
+#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
+#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
+#define AXP20X_GPIO_FUNCTION_INPUT 2
+
+#define AXP20X_FUNC_GPIO_OUT 0
+#define AXP20X_FUNC_GPIO_IN 1
+#define AXP20X_FUNC_LDO 2
+#define AXP20X_FUNC_ADC 3
+#define AXP20X_FUNCS_NB 4
+
+#define AXP20X_MUX_GPIO_OUT 0
+#define AXP20X_MUX_GPIO_IN BIT(1)
+#define AXP20X_MUX_ADC BIT(2)
+
+#define AXP813_MUX_ADC (BIT(2) | BIT(0))
+
+struct axp20x_pctrl_desc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ /* Stores the pins supporting LDO function. Bit offset is pin number. */
+ u8 ldo_mask;
+ /* Stores the pins supporting ADC function. Bit offset is pin number. */
+ u8 adc_mask;
+ u8 gpio_status_offset;
+ u8 adc_mux;
+};
+
+struct axp20x_pinctrl_function {
+ const char *name;
+ unsigned int muxval;
+ const char **groups;
+ unsigned int ngroups;
+};
+
+struct axp20x_pctl {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+ struct pinctrl_dev *pctl_dev;
+ struct device *dev;
+ const struct axp20x_pctrl_desc *desc;
+ struct axp20x_pinctrl_function funcs[AXP20X_FUNCS_NB];
+};
+
+static const struct pinctrl_pin_desc axp209_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+ PINCTRL_PIN(2, "GPIO2"),
+};
+
+static const struct pinctrl_pin_desc axp813_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+};
+
+static const struct axp20x_pctrl_desc axp20x_data = {
+ .pins = axp209_pins,
+ .npins = ARRAY_SIZE(axp209_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 4,
+ .adc_mux = AXP20X_MUX_ADC,
+};
+
+static const struct axp20x_pctrl_desc axp813_data = {
+ .pins = axp813_pins,
+ .npins = ARRAY_SIZE(axp813_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0),
+ .gpio_status_offset = 0,
+ .adc_mux = AXP813_MUX_ADC,
+};
+
+static int axp20x_gpio_get_reg(unsigned int offset)
+{
+ switch (offset) {
+ case 0:
+ return AXP20X_GPIO0_CTRL;
+ case 1:
+ return AXP20X_GPIO1_CTRL;
+ case 2:
+ return AXP20X_GPIO2_CTRL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_gpio_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int axp20x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(pctl->regmap, AXP20X_GPIO20_SS, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset + pctl->desc->gpio_status_offset));
+}
+
+static int axp20x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int reg, ret;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(pctl->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * This shouldn't really happen if the pin is in use already,
+ * or if it's not in use yet, it doesn't matter since we're
+ * going to change the value soon anyway. Default to output.
+ */
+ if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
+ return 0;
+
+ /*
+ * The GPIO directions are the three lowest values.
+ * 2 is input, 0 and 1 are output
+ */
+ return val & 2;
+}
+
+static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ chip->set(chip, offset, value);
+
+ return 0;
+}
+
+static void axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return;
+
+ regmap_update_bits(pctl->regmap, reg,
+ AXP20X_GPIO_FUNCTIONS,
+ value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO_FUNCTION_OUT_LOW);
+}
+
+static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
+ u8 config)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(pctl->regmap, reg, AXP20X_GPIO_FUNCTIONS,
+ config);
+}
+
+static int axp20x_pmx_func_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ARRAY_SIZE(pctl->funcs);
+}
+
+static const char *axp20x_pmx_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->funcs[selector].name;
+}
+
+static int axp20x_pmx_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->funcs[selector].groups;
+ *num_groups = pctl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+static int axp20x_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int mask;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ if (function <= AXP20X_FUNC_GPIO_IN)
+ return axp20x_pmx_set(pctldev, group,
+ pctl->funcs[function].muxval);
+
+ if (function == AXP20X_FUNC_LDO)
+ mask = pctl->desc->ldo_mask;
+ else
+ mask = pctl->desc->adc_mask;
+
+ if (!(BIT(group) & mask))
+ return -EINVAL;
+
+ /*
+ * We let the regulator framework handle the LDO muxing as muxing bits
+ * are basically also regulators on/off bits. It's better not to enforce
+ * any state of the regulator when selecting LDO mux so that we don't
+ * interfere with the regulator driver.
+ */
+ if (function == AXP20X_FUNC_LDO)
+ return 0;
+
+ return axp20x_pmx_set(pctldev, group, pctl->funcs[function].muxval);
+}
+
+static int axp20x_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (input)
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval);
+
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval);
+}
+
+static const struct pinmux_ops axp20x_pmx_ops = {
+ .get_functions_count = axp20x_pmx_func_cnt,
+ .get_function_name = axp20x_pmx_func_name,
+ .get_function_groups = axp20x_pmx_func_groups,
+ .set_mux = axp20x_pmx_set_mux,
+ .gpio_set_direction = axp20x_pmx_gpio_set_direction,
+ .strict = true,
+};
+
+static int axp20x_groups_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->npins;
+}
+
+static int axp20x_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins, unsigned int *num_pins)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned int *)&pctl->desc->pins[selector];
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const char *axp20x_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->pins[selector].name;
+}
+
+static const struct pinctrl_ops axp20x_pctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = axp20x_groups_cnt,
+ .get_group_name = axp20x_group_name,
+ .get_group_pins = axp20x_group_pins,
+};
+
+static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+ unsigned int mask_len,
+ struct axp20x_pinctrl_function *func,
+ const struct pinctrl_pin_desc *pins)
+{
+ unsigned long int mask_cpy = mask;
+ const char **group;
+ unsigned int ngroups = hweight8(mask);
+ int bit;
+
+ func->ngroups = ngroups;
+ if (func->ngroups > 0) {
+ func->groups = devm_kzalloc(dev, ngroups * sizeof(const char *),
+ GFP_KERNEL);
+ group = func->groups;
+ for_each_set_bit(bit, &mask_cpy, mask_len) {
+ *group = pins[bit].name;
+ group++;
+ }
+ }
+}
+
+static void axp20x_build_funcs_groups(struct platform_device *pdev)
+{
+ struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
+ int i, pin, npins = pctl->desc->npins;
+
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].name = "gpio_in";
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval = AXP20X_MUX_GPIO_IN;
+ pctl->funcs[AXP20X_FUNC_LDO].name = "ldo";
+ /*
+ * Muxval for LDO is useless as we won't use it.
+ * See comment in axp20x_pmx_set_mux.
+ */
+ pctl->funcs[AXP20X_FUNC_ADC].name = "adc";
+ pctl->funcs[AXP20X_FUNC_ADC].muxval = pctl->desc->adc_mux;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ for (i = 0; i <= AXP20X_FUNC_GPIO_IN; i++) {
+ pctl->funcs[i].ngroups = npins;
+ pctl->funcs[i].groups = devm_kzalloc(&pdev->dev,
+ npins * sizeof(char *),
+ GFP_KERNEL);
+ for (pin = 0; pin < npins; pin++)
+ pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
+ }
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_LDO],
+ pctl->desc->pins);
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_ADC],
+ pctl->desc->pins);
+}
+
+static const struct of_device_id axp20x_pctl_match[] = {
+ { .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axp20x_pctl_match);
+
+static int axp20x_pctl_probe(struct platform_device *pdev)
+{
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct axp20x_pctl *pctl;
+ struct device *dev = &pdev->dev;
+ struct pinctrl_desc *pctrl_desc;
+ int ret;
+
+ if (!of_device_is_available(pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!axp20x) {
+ dev_err(&pdev->dev, "Parent drvdata not set\n");
+ return -EINVAL;
+ }
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ pctl->chip.base = -1;
+ pctl->chip.can_sleep = true;
+ pctl->chip.request = gpiochip_generic_request;
+ pctl->chip.free = gpiochip_generic_free;
+ pctl->chip.parent = &pdev->dev;
+ pctl->chip.label = dev_name(&pdev->dev);
+ pctl->chip.owner = THIS_MODULE;
+ pctl->chip.get = axp20x_gpio_get;
+ pctl->chip.get_direction = axp20x_gpio_get_direction;
+ pctl->chip.set = axp20x_gpio_set;
+ pctl->chip.direction_input = axp20x_gpio_input;
+ pctl->chip.direction_output = axp20x_gpio_output;
+ pctl->chip.ngpio = pctl->desc->npins;
+
+ pctl->desc = (struct axp20x_pctrl_desc *)of_device_get_match_data(dev);
+ pctl->regmap = axp20x->regmap;
+ pctl->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, pctl);
+
+ axp20x_build_funcs_groups(pdev);
+
+ pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
+ if (!pctrl_desc)
+ return -ENOMEM;
+
+ pctrl_desc->name = dev_name(&pdev->dev);
+ pctrl_desc->owner = THIS_MODULE;
+ pctrl_desc->pins = pctl->desc->pins;
+ pctrl_desc->npins = pctl->desc->npins;
+ pctrl_desc->pctlops = &axp20x_pctrl_ops;
+ pctrl_desc->pmxops = &axp20x_pmx_ops;
+
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
+ if (IS_ERR(pctl->pctl_dev)) {
+ dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+ return PTR_ERR(pctl->pctl_dev);
+ }
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &pctl->chip, pctl);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPIO chip\n");
+ return ret;
+ }
+
+ ret = gpiochip_add_pin_range(&pctl->chip, dev_name(&pdev->dev),
+ pctl->desc->pins->number,
+ pctl->desc->pins->number,
+ pctl->desc->npins);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add pin range\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "AXP209 pinctrl and GPIO driver loaded\n");
+
+ return 0;
+}
+
+static struct platform_driver axp20x_pctl_driver = {
+ .probe = axp20x_pctl_probe,
+ .driver = {
+ .name = "axp20x-gpio",
+ .of_match_table = axp20x_pctl_match,
+ },
+};
+
+module_platform_driver(axp20x_pctl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20x PMIC pinctrl and GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 0ad6e290bbda..e728a96cabfd 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -38,14 +38,8 @@ config CHROMEOS_PSTORE
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_pstore.
-config CROS_EC_CHARDEV
- tristate "Chrome OS Embedded Controller userspace device interface"
- depends on MFD_CROS_EC
- ---help---
- This driver adds support to talk with the ChromeOS EC from userspace.
-
- If you have a supported Chromebook, choose Y or M here.
- The module will be called cros_ec_dev.
+config CROS_EC_CTL
+ tristate
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index a077b1f0211d..ff3b369911f0 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -2,10 +2,9 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
-cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
- cros_ec_lightbar.o cros_ec_vbc.o \
- cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \
+ cros_ec_vbc.o cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 4cc66f405760..5473e602f7e0 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -29,9 +29,6 @@
#include <linux/slab.h>
#include <linux/wait.h>
-#include "cros_ec_dev.h"
-#include "cros_ec_debugfs.h"
-
#define LOG_SHIFT 14
#define LOG_SIZE (1 << LOG_SHIFT)
#define LOG_POLL_SEC 10
@@ -191,11 +188,11 @@ error:
return ret;
}
-static unsigned int cros_ec_console_log_poll(struct file *file,
+static __poll_t cros_ec_console_log_poll(struct file *file,
poll_table *wait)
{
struct cros_ec_debugfs *debug_info = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &debug_info->log_wq, wait);
@@ -390,6 +387,7 @@ remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
}
+EXPORT_SYMBOL(cros_ec_debugfs_init);
void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
{
@@ -399,3 +397,4 @@ void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
debugfs_remove_recursive(ec->debug_info->dir);
cros_ec_cleanup_console_log(ec->debug_info);
}
+EXPORT_SYMBOL(cros_ec_debugfs_remove);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.h b/drivers/platform/chrome/cros_ec_debugfs.h
deleted file mode 100644
index 1ff3a50aa1b8..000000000000
--- a/drivers/platform/chrome/cros_ec_debugfs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2015 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _DRV_CROS_EC_DEBUGFS_H_
-#define _DRV_CROS_EC_DEBUGFS_H_
-
-#include "cros_ec_dev.h"
-
-/* debugfs stuff */
-int cros_ec_debugfs_init(struct cros_ec_dev *ec);
-void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
-
-#endif /* _DRV_CROS_EC_DEBUGFS_H_ */
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index fd2b047a2748..6ea79d495aa2 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -33,8 +33,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include "cros_ec_dev.h"
-
/* Rate-limit the lightbar interface to prevent DoS. */
static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
@@ -414,6 +412,7 @@ error:
return ret;
}
+EXPORT_SYMBOL(lb_manual_suspend_ctrl);
int lb_suspend(struct cros_ec_dev *ec)
{
@@ -422,6 +421,7 @@ int lb_suspend(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND);
}
+EXPORT_SYMBOL(lb_suspend);
int lb_resume(struct cros_ec_dev *ec)
{
@@ -430,6 +430,7 @@ int lb_resume(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME);
}
+EXPORT_SYMBOL(lb_resume);
static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -622,3 +623,4 @@ struct attribute_group cros_ec_lightbar_attr_group = {
.attrs = __lb_cmds_attrs,
.is_visible = cros_ec_lightbar_attrs_are_visible,
};
+EXPORT_SYMBOL(cros_ec_lightbar_attr_group);
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf9973989..d6eebe872187 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -34,8 +34,6 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "cros_ec_dev.h"
-
/* Accessor functions */
static ssize_t show_ec_reboot(struct device *dev,
@@ -294,4 +292,7 @@ static struct attribute *__ec_attrs[] = {
struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
};
+EXPORT_SYMBOL(cros_ec_attr_group);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC control driver");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 564a0d08c8bf..6d38e6b08334 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -135,3 +135,4 @@ struct attribute_group cros_ec_vbc_attr_group = {
.bin_attrs = cros_ec_vbc_bin_attrs,
.is_bin_visible = cros_ec_vbc_is_visible,
};
+EXPORT_SYMBOL(cros_ec_vbc_attr_group);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 0578d34eec3f..999f1152655a 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -536,10 +536,10 @@ static ssize_t goldfish_pipe_write(struct file *filp,
/* is_write */ 1);
}
-static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status;
poll_wait(filp, &pipe->wake_queue, wait);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 935121814c97..a4fabf9d75f3 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4124,7 +4124,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
if (kfifo_len(&sonypi_compat.fifo))
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 6505c97705e1..1b491690ce07 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -119,7 +119,7 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
if (key_code == KEY_RESERVED)
return;
if (pressed)
- pm_wakeup_event(&device->dev, 0);
+ pm_wakeup_dev_event(&device->dev, 0, button->suspended);
if (button->suspended)
return;
input_report_key(input, key_code, pressed?1:0);
@@ -185,6 +185,8 @@ static int surface_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_free_input;
+
+ device_init_wakeup(&device->dev, true);
dev_info(&device->dev,
"%s [%s]\n", name, acpi_device_bid(device));
return 0;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index e681140b85d8..077f334fdbae 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -581,10 +581,7 @@ static int __init pnpbios_thread_init(void)
init_completion(&unload_sem);
task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- return 0;
+ return PTR_ERR_OR_ZERO(task);
}
/* Start the kernel thread later: */
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f054cdddfef8..803666ae3635 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
-#include <linux/kallsyms.h>
#include "base.h"
static void quirk_awe32_add_ports(struct pnp_dev *dev,
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 75f63e38a8d1..ed2b109ae8fc 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -76,7 +76,7 @@ struct rockchip_iodomain_supply {
struct rockchip_iodomain {
struct device *dev;
struct regmap *grf;
- struct rockchip_iodomain_soc_data *soc_data;
+ const struct rockchip_iodomain_soc_data *soc_data;
struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
};
@@ -382,43 +382,43 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,rk3188-io-voltage-domain",
- .data = (void *)&soc_data_rk3188
+ .data = &soc_data_rk3188
},
{
.compatible = "rockchip,rk3228-io-voltage-domain",
- .data = (void *)&soc_data_rk3228
+ .data = &soc_data_rk3228
},
{
.compatible = "rockchip,rk3288-io-voltage-domain",
- .data = (void *)&soc_data_rk3288
+ .data = &soc_data_rk3288
},
{
.compatible = "rockchip,rk3328-io-voltage-domain",
- .data = (void *)&soc_data_rk3328
+ .data = &soc_data_rk3328
},
{
.compatible = "rockchip,rk3368-io-voltage-domain",
- .data = (void *)&soc_data_rk3368
+ .data = &soc_data_rk3368
},
{
.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3368_pmu
+ .data = &soc_data_rk3368_pmu
},
{
.compatible = "rockchip,rk3399-io-voltage-domain",
- .data = (void *)&soc_data_rk3399
+ .data = &soc_data_rk3399
},
{
.compatible = "rockchip,rk3399-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3399_pmu
+ .data = &soc_data_rk3399_pmu
},
{
.compatible = "rockchip,rv1108-io-voltage-domain",
- .data = (void *)&soc_data_rv1108
+ .data = &soc_data_rv1108
},
{
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rv1108_pmu
+ .data = &soc_data_rv1108_pmu
},
{ /* sentinel */ },
};
@@ -443,7 +443,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iod);
match = of_match_node(rockchip_iodomain_match, np);
- iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+ iod->soc_data = match->data;
parent = pdev->dev.parent;
if (parent && parent->of_node) {
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ca0de1a78e85..a102e74ab24e 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -98,15 +98,6 @@ config POWER_RESET_HISI
help
Reboot support for Hisilicon boards.
-config POWER_RESET_IMX
- bool "IMX6 power-off driver"
- depends on POWER_RESET && SOC_IMX6
- help
- This driver support power off external PMIC by PMIC_ON_REQ on i.mx6
- boards.If you want to use other pin to control external power,please
- say N here or disable in dts to make sure pm_power_off never be
- overwrote wrongly by this driver.
-
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index aeb65edb17b7..dcc92f5f7a37 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
-obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 31080c254124..0206cce328b3 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -68,7 +68,7 @@ struct shdwc_config {
};
struct shdwc {
- struct shdwc_config *cfg;
+ const struct shdwc_config *cfg;
void __iomem *at91_shdwc_base;
};
@@ -260,7 +260,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
- at91_shdwc->cfg = (struct shdwc_config *)(match->data);
+ at91_shdwc->cfg = match->data;
sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sclk))
diff --git a/drivers/power/reset/imx-snvs-poweroff.c b/drivers/power/reset/imx-snvs-poweroff.c
deleted file mode 100644
index ad6ce5020ea7..000000000000
--- a/drivers/power/reset/imx-snvs-poweroff.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Power off driver for i.mx6
- * Copyright (c) 2014, FREESCALE CORPORATION. All rights reserved.
- *
- * based on msm-poweroff.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-static void __iomem *snvs_base;
-
-static void do_imx_poweroff(void)
-{
- u32 value = readl(snvs_base);
-
- /* set TOP and DP_EN bit */
- writel(value | 0x60, snvs_base);
-}
-
-static int imx_poweroff_probe(struct platform_device *pdev)
-{
- snvs_base = of_iomap(pdev->dev.of_node, 0);
- if (!snvs_base) {
- dev_err(&pdev->dev, "failed to get memory\n");
- return -ENODEV;
- }
-
- pm_power_off = do_imx_poweroff;
- return 0;
-}
-
-static const struct of_device_id of_imx_poweroff_match[] = {
- { .compatible = "fsl,sec-v4.0-poweroff", },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_imx_poweroff_match);
-
-static struct platform_driver imx_poweroff_driver = {
- .probe = imx_poweroff_probe,
- .driver = {
- .name = "imx-snvs-poweroff",
- .of_match_table = of_match_ptr(of_imx_poweroff_match),
- },
-};
-
-static int __init imx_poweroff_init(void)
-{
- return platform_driver_register(&imx_poweroff_driver);
-}
-device_initcall(imx_poweroff_init);
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 4702efdfe466..01b8c71697cb 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -23,7 +23,7 @@
#include <linux/pm.h>
static void __iomem *msm_ps_hold;
-static int do_msm_restart(struct notifier_block *nb, unsigned long action,
+static int deassert_pshold(struct notifier_block *nb, unsigned long action,
void *data)
{
writel(0, msm_ps_hold);
@@ -33,14 +33,13 @@ static int do_msm_restart(struct notifier_block *nb, unsigned long action,
}
static struct notifier_block restart_nb = {
- .notifier_call = do_msm_restart,
+ .notifier_call = deassert_pshold,
.priority = 128,
};
static void do_msm_poweroff(void)
{
- /* TODO: Add poweroff capability */
- do_msm_restart(&restart_nb, 0, NULL);
+ deassert_pshold(&restart_nb, 0, NULL);
}
static int msm_restart_probe(struct platform_device *pdev)
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index 7549c7f74a3c..c03e96e6a041 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = {
},
};
module_platform_driver(zx_reboot_driver);
+
+MODULE_DESCRIPTION("ZTE SoCs reset driver");
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 4ebbcce45c48..5a76c6d343de 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
/* Enable backup battery charging */
- abx500_mask_and_set_register_interruptible(di->dev,
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG,
RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
- if (ret < 0)
+ if (ret < 0) {
dev_err(di->dev, "%s mask and set failed\n", __func__);
+ goto out;
+ }
if (is_ab8540(di->parent)) {
ret = abx500_mask_and_set_register_interruptible(di->dev,
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 38f4e87cf24d..0771f951b11f 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -159,7 +159,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_ac_power *power;
- struct axp_data *axp_data;
+ const struct axp_data *axp_data;
static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
NULL };
int i, irq, ret;
@@ -176,7 +176,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
- axp_data = (struct axp_data *)of_device_get_match_data(&pdev->dev);
+ axp_data = of_device_get_match_data(&pdev->dev);
if (axp_data->acin_adc) {
power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
@@ -230,10 +230,10 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
static const struct of_device_id axp20x_ac_power_match[] = {
{
.compatible = "x-powers,axp202-ac-power-supply",
- .data = (void *)&axp20x_data,
+ .data = &axp20x_data,
}, {
.compatible = "x-powers,axp221-ac-power-supply",
- .data = (void *)&axp22x_data,
+ .data = &axp22x_data,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index d51ebd1da65e..9bfbde15b07d 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -1,6 +1,7 @@
/*
* axp288_charger.c - X-power AXP288 PMIC Charger driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
* Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
*
@@ -98,28 +99,10 @@
#define CV_4200MV 4200 /* 4200mV */
#define CV_4350MV 4350 /* 4350mV */
-#define CC_200MA 200 /* 200mA */
-#define CC_600MA 600 /* 600mA */
-#define CC_800MA 800 /* 800mA */
-#define CC_1000MA 1000 /* 1000mA */
-#define CC_1600MA 1600 /* 1600mA */
-#define CC_2000MA 2000 /* 2000mA */
-
-#define ILIM_100MA 100 /* 100mA */
-#define ILIM_500MA 500 /* 500mA */
-#define ILIM_900MA 900 /* 900mA */
-#define ILIM_1500MA 1500 /* 1500mA */
-#define ILIM_2000MA 2000 /* 2000mA */
-#define ILIM_2500MA 2500 /* 2500mA */
-#define ILIM_3000MA 3000 /* 3000mA */
-
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
#define USB_HOST_EXTCON_HID "INT3496"
#define USB_HOST_EXTCON_NAME "INT3496:00"
-static const unsigned int cable_ids[] =
- { EXTCON_CHG_USB_SDP, EXTCON_CHG_USB_CDP, EXTCON_CHG_USB_DCP };
-
enum {
VBUS_OV_IRQ = 0,
CHARGE_DONE_IRQ,
@@ -139,7 +122,6 @@ struct axp288_chrg_info {
struct regmap_irq_chip_data *regmap_irqc;
int irq[CHRG_INTR_END];
struct power_supply *psy_usb;
- struct mutex lock;
/* OTG/Host mode */
struct {
@@ -152,18 +134,14 @@ struct axp288_chrg_info {
/* SDP/CDP/DCP USB charging cable notifications */
struct {
struct extcon_dev *edev;
- bool connected;
- enum power_supply_type chg_type;
- struct notifier_block nb[ARRAY_SIZE(cable_ids)];
+ struct notifier_block nb;
struct work_struct work;
} cable;
- int inlmt;
int cc;
int cv;
int max_cc;
int max_cv;
- int is_charger_enabled;
};
static inline int axp288_charger_set_cc(struct axp288_chrg_info *info, int cc)
@@ -220,51 +198,63 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv)
return ret;
}
-static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
- int inlmt)
+static int axp288_charger_get_vbus_inlmt(struct axp288_chrg_info *info)
{
- int ret;
unsigned int val;
- u8 reg_val;
+ int ret;
- /* Read in limit register */
ret = regmap_read(info->regmap, AXP20X_CHRG_BAK_CTRL, &val);
if (ret < 0)
- goto set_inlmt_fail;
-
- if (inlmt <= ILIM_100MA) {
- reg_val = CHRG_VBUS_ILIM_100MA;
- inlmt = ILIM_100MA;
- } else if (inlmt <= ILIM_500MA) {
- reg_val = CHRG_VBUS_ILIM_500MA;
- inlmt = ILIM_500MA;
- } else if (inlmt <= ILIM_900MA) {
- reg_val = CHRG_VBUS_ILIM_900MA;
- inlmt = ILIM_900MA;
- } else if (inlmt <= ILIM_1500MA) {
- reg_val = CHRG_VBUS_ILIM_1500MA;
- inlmt = ILIM_1500MA;
- } else if (inlmt <= ILIM_2000MA) {
- reg_val = CHRG_VBUS_ILIM_2000MA;
- inlmt = ILIM_2000MA;
- } else if (inlmt <= ILIM_2500MA) {
- reg_val = CHRG_VBUS_ILIM_2500MA;
- inlmt = ILIM_2500MA;
- } else {
- reg_val = CHRG_VBUS_ILIM_3000MA;
- inlmt = ILIM_3000MA;
+ return ret;
+
+ val >>= CHRG_VBUS_ILIM_BIT_POS;
+ switch (val) {
+ case CHRG_VBUS_ILIM_100MA:
+ return 100000;
+ case CHRG_VBUS_ILIM_500MA:
+ return 500000;
+ case CHRG_VBUS_ILIM_900MA:
+ return 900000;
+ case CHRG_VBUS_ILIM_1500MA:
+ return 1500000;
+ case CHRG_VBUS_ILIM_2000MA:
+ return 2000000;
+ case CHRG_VBUS_ILIM_2500MA:
+ return 2500000;
+ case CHRG_VBUS_ILIM_3000MA:
+ return 3000000;
+ default:
+ dev_warn(&info->pdev->dev, "Unknown ilim reg val: %d\n", val);
+ return 0;
}
+}
- reg_val = (val & ~CHRG_VBUS_ILIM_MASK)
- | (reg_val << CHRG_VBUS_ILIM_BIT_POS);
- ret = regmap_write(info->regmap, AXP20X_CHRG_BAK_CTRL, reg_val);
- if (ret >= 0)
- info->inlmt = inlmt;
+static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
+ int inlmt)
+{
+ int ret;
+ u8 reg_val;
+
+ if (inlmt >= 3000000)
+ reg_val = CHRG_VBUS_ILIM_3000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2500000)
+ reg_val = CHRG_VBUS_ILIM_2500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2000000)
+ reg_val = CHRG_VBUS_ILIM_2000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 1500000)
+ reg_val = CHRG_VBUS_ILIM_1500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 900000)
+ reg_val = CHRG_VBUS_ILIM_900MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 500000)
+ reg_val = CHRG_VBUS_ILIM_500MA << CHRG_VBUS_ILIM_BIT_POS;
else
- dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
+ reg_val = CHRG_VBUS_ILIM_100MA << CHRG_VBUS_ILIM_BIT_POS;
+ ret = regmap_update_bits(info->regmap, AXP20X_CHRG_BAK_CTRL,
+ CHRG_VBUS_ILIM_MASK, reg_val);
+ if (ret < 0)
+ dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
-set_inlmt_fail:
return ret;
}
@@ -283,7 +273,6 @@ static int axp288_charger_vbus_path_select(struct axp288_chrg_info *info,
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 vbus path select %d\n", ret);
-
return ret;
}
@@ -292,9 +281,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
{
int ret;
- if ((int)enable == info->is_charger_enabled)
- return 0;
-
if (enable)
ret = regmap_update_bits(info->regmap, AXP20X_CHRG_CTRL1,
CHRG_CCCV_CHG_EN, CHRG_CCCV_CHG_EN);
@@ -303,8 +289,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
CHRG_CCCV_CHG_EN, 0);
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 enable charger %d\n", ret);
- else
- info->is_charger_enabled = enable;
return ret;
}
@@ -376,8 +360,6 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
int ret = 0;
int scaled_val;
- mutex_lock(&info->lock);
-
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
scaled_val = min(val->intval, info->max_cc);
@@ -393,11 +375,15 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
if (ret < 0)
dev_warn(&info->pdev->dev, "set charge voltage failed\n");
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_set_vbus_inlmt(info, val->intval);
+ if (ret < 0)
+ dev_warn(&info->pdev->dev, "set input current limit failed\n");
+ break;
default:
ret = -EINVAL;
}
- mutex_unlock(&info->lock);
return ret;
}
@@ -406,9 +392,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct axp288_chrg_info *info = power_supply_get_drvdata(psy);
- int ret = 0;
-
- mutex_lock(&info->lock);
+ int ret;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
@@ -419,7 +403,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_present(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_ONLINE:
@@ -430,7 +414,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_online(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -448,17 +432,17 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
val->intval = info->max_cv * 1000;
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
- val->intval = info->inlmt * 1000;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_get_vbus_inlmt(info);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
break;
default:
- ret = -EINVAL;
- goto psy_get_prop_fail;
+ return -EINVAL;
}
-psy_get_prop_fail:
- mutex_unlock(&info->lock);
- return ret;
+ return 0;
}
static int axp288_charger_property_is_writeable(struct power_supply *psy,
@@ -469,6 +453,7 @@ static int axp288_charger_property_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
ret = 1;
break;
default:
@@ -487,7 +472,7 @@ static enum power_supply_property axp288_usb_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
static const struct power_supply_desc axp288_charger_desc = {
@@ -565,99 +550,53 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
container_of(work, struct axp288_chrg_info, cable.work);
int ret, current_limit;
struct extcon_dev *edev = info->cable.edev;
- bool old_connected = info->cable.connected;
- enum power_supply_type old_chg_type = info->cable.chg_type;
+ unsigned int val;
+
+ ret = regmap_read(info->regmap, AXP20X_PWR_INPUT_STATUS, &val);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "Error reading status (%d)\n", ret);
+ return;
+ }
+
+ /* Offline? Disable charging and bail */
+ if (!(val & PS_STAT_VBUS_VALID)) {
+ dev_dbg(&info->pdev->dev, "USB charger disconnected\n");
+ axp288_charger_enable_charger(info, false);
+ power_supply_changed(info->psy_usb);
+ return;
+ }
/* Determine cable/charger type */
if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
+ dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
+ current_limit = 500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
+ dev_dbg(&info->pdev->dev, "USB CDP charger is connected\n");
+ current_limit = 1500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
- dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ dev_dbg(&info->pdev->dev, "USB DCP charger is connected\n");
+ current_limit = 2000000;
} else {
- if (old_connected)
- dev_dbg(&info->pdev->dev, "USB charger disconnected");
- info->cable.connected = false;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
- }
-
- /* Cable status changed */
- if (old_connected == info->cable.connected &&
- old_chg_type == info->cable.chg_type)
+ /* Charger type detection still in progress, bail. */
return;
-
- mutex_lock(&info->lock);
-
- if (info->cable.connected) {
- axp288_charger_enable_charger(info, false);
-
- switch (info->cable.chg_type) {
- case POWER_SUPPLY_TYPE_USB:
- current_limit = ILIM_500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_CDP:
- current_limit = ILIM_1500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_DCP:
- current_limit = ILIM_2000MA;
- break;
- default:
- /* Unknown */
- current_limit = 0;
- break;
- }
-
- /* Set vbus current limit first, then enable charger */
- ret = axp288_charger_set_vbus_inlmt(info, current_limit);
- if (ret == 0)
- axp288_charger_enable_charger(info, true);
- else
- dev_err(&info->pdev->dev,
- "error setting current limit (%d)", ret);
- } else {
- axp288_charger_enable_charger(info, false);
}
- mutex_unlock(&info->lock);
+ /* Set vbus current limit first, then enable charger */
+ ret = axp288_charger_set_vbus_inlmt(info, current_limit);
+ if (ret == 0)
+ axp288_charger_enable_charger(info, true);
+ else
+ dev_err(&info->pdev->dev,
+ "error setting current limit (%d)\n", ret);
power_supply_changed(info->psy_usb);
}
-/*
- * We need 3 copies of this, because there is no way to find out for which
- * cable id we are being called from the passed in arguments; and we must
- * have a separate nb for each extcon_register_notifier call.
- */
-static int axp288_charger_handle_cable0_evt(struct notifier_block *nb,
- unsigned long event, void *param)
+static int axp288_charger_handle_cable_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
{
struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[0]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable1_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[1]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable2_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[2]);
+ container_of(nb, struct axp288_chrg_info, cable.nb);
schedule_work(&info->cable.work);
return NOTIFY_OK;
}
@@ -785,6 +724,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return 0;
}
+static void axp288_charger_cancel_work(void *data)
+{
+ struct axp288_chrg_info *info = data;
+
+ cancel_work_sync(&info->otg.work);
+ cancel_work_sync(&info->cable.work);
+}
+
static int axp288_charger_probe(struct platform_device *pdev)
{
int ret, i, pirq;
@@ -799,8 +746,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->pdev = pdev;
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
- info->cable.chg_type = -1;
- info->is_charger_enabled = -1;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
if (info->cable.edev == NULL) {
@@ -820,7 +765,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- mutex_init(&info->lock);
ret = charger_init_hw_regs(info);
if (ret)
@@ -836,19 +780,19 @@ static int axp288_charger_probe(struct platform_device *pdev)
return ret;
}
+ /* Cancel our work on cleanup, register this before the notifiers */
+ ret = devm_add_action(dev, axp288_charger_cancel_work, info);
+ if (ret)
+ return ret;
+
/* Register for extcon notification */
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
- info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
- info->cable.nb[1].notifier_call = axp288_charger_handle_cable1_evt;
- info->cable.nb[2].notifier_call = axp288_charger_handle_cable2_evt;
- for (i = 0; i < ARRAY_SIZE(cable_ids); i++) {
- ret = devm_extcon_register_notifier(dev, info->cable.edev,
- cable_ids[i], &info->cable.nb[i]);
- if (ret) {
- dev_err(dev, "failed to register extcon notifier for %u: %d\n",
- cable_ids[i], ret);
- return ret;
- }
+ info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
+ ret = devm_extcon_register_notifier_all(dev, info->cable.edev,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(dev, "failed to register cable extcon notifier\n");
+ return ret;
}
schedule_work(&info->cable.work);
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index a8dcabc32721..4cc6e038dfdd 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1,6 +1,7 @@
/*
* axp288_fuel_gauge.c - Xpower AXP288 PMIC Fuel Gauge Driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -16,6 +17,7 @@
*
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -31,6 +33,12 @@
#include <linux/seq_file.h>
#include <asm/unaligned.h>
+#define PS_STAT_VBUS_TRIGGER (1 << 0)
+#define PS_STAT_BAT_CHRG_DIR (1 << 2)
+#define PS_STAT_VBAT_ABOVE_VHOLD (1 << 3)
+#define PS_STAT_VBUS_VALID (1 << 4)
+#define PS_STAT_VBUS_PRESENT (1 << 5)
+
#define CHRG_STAT_BAT_SAFE_MODE (1 << 3)
#define CHRG_STAT_BAT_VALID (1 << 4)
#define CHRG_STAT_BAT_PRESENT (1 << 5)
@@ -100,11 +108,22 @@ enum {
WL1_IRQ,
};
+enum {
+ BAT_TEMP = 0,
+ PMIC_TEMP,
+ SYSTEM_TEMP,
+ BAT_CHRG_CURR,
+ BAT_D_CURR,
+ BAT_VOLT,
+ IIO_CHANNEL_NUM
+};
+
struct axp288_fg_info {
struct platform_device *pdev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
int irq[AXP288_FG_INTR_NUM];
+ struct iio_channel *iio_channel[IIO_CHANNEL_NUM];
struct power_supply *bat;
struct mutex lock;
int status;
@@ -199,33 +218,6 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
}
-static int pmic_read_adc_val(const char *name, int *raw_val,
- struct axp288_fg_info *info)
-{
- int ret, val = 0;
- struct iio_channel *indio_chan;
-
- indio_chan = iio_channel_get(NULL, name);
- if (IS_ERR_OR_NULL(indio_chan)) {
- ret = PTR_ERR(indio_chan);
- goto exit;
- }
- ret = iio_read_channel_raw(indio_chan, &val);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "IIO channel read error: %x, %x\n", ret, val);
- goto err_exit;
- }
-
- dev_dbg(&info->pdev->dev, "adc raw val=%x\n", val);
- *raw_val = val;
-
-err_exit:
- iio_channel_release(indio_chan);
-exit:
- return ret;
-}
-
#ifdef CONFIG_DEBUG_FS
static int fuel_gauge_debug_show(struct seq_file *s, void *data)
{
@@ -296,22 +288,22 @@ static int fuel_gauge_debug_show(struct seq_file *s, void *data)
AXP288_FG_TUNE5,
fuel_gauge_reg_readb(info, AXP288_FG_TUNE5));
- ret = pmic_read_adc_val("axp288-batt-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-batttemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-pmic-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[PMIC_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-pmictemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-system-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[SYSTEM_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-systtemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-chrgcurr : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-dchrgcur : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-battvolt : %d\n", raw_val);
@@ -351,8 +343,7 @@ static inline void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
static void fuel_gauge_get_status(struct axp288_fg_info *info)
{
- int pwr_stat, ret;
- int charge, discharge;
+ int pwr_stat, fg_res;
pwr_stat = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
if (pwr_stat < 0) {
@@ -360,36 +351,32 @@ static void fuel_gauge_get_status(struct axp288_fg_info *info)
"PWR STAT read failed:%d\n", pwr_stat);
return;
}
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC charge current read failed:%d\n", ret);
- return;
- }
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC discharge current read failed:%d\n", ret);
- return;
+
+ /* Report full if Vbus is valid and the reported capacity is 100% */
+ if (pwr_stat & PS_STAT_VBUS_VALID) {
+ fg_res = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
+ if (fg_res < 0) {
+ dev_err(&info->pdev->dev,
+ "FG RES read failed: %d\n", fg_res);
+ return;
+ }
+ if (fg_res == (FG_REP_CAP_VALID | 100)) {
+ info->status = POWER_SUPPLY_STATUS_FULL;
+ return;
+ }
}
- if (charge > 0)
+ if (pwr_stat & PS_STAT_BAT_CHRG_DIR)
info->status = POWER_SUPPLY_STATUS_CHARGING;
- else if (discharge > 0)
+ else
info->status = POWER_SUPPLY_STATUS_DISCHARGING;
- else {
- if (pwr_stat & CHRG_STAT_BAT_PRESENT)
- info->status = POWER_SUPPLY_STATUS_FULL;
- else
- info->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
}
static int fuel_gauge_get_vbatt(struct axp288_fg_info *info, int *vbatt)
{
int ret = 0, raw_val;
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret < 0)
goto vbatt_read_fail;
@@ -400,24 +387,19 @@ vbatt_read_fail:
static int fuel_gauge_get_current(struct axp288_fg_info *info, int *cur)
{
- int ret, value = 0;
- int charge, discharge;
+ int ret, discharge;
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0)
- goto current_read_fail;
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
+ /* First check discharge current, so that we do only 1 read on bat. */
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &discharge);
if (ret < 0)
- goto current_read_fail;
+ return ret;
- if (charge > 0)
- value = charge;
- else if (discharge > 0)
- value = -1 * discharge;
+ if (discharge > 0) {
+ *cur = -1 * discharge;
+ return 0;
+ }
- *cur = value;
-current_read_fail:
- return ret;
+ return iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], cur);
}
static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
@@ -698,12 +680,54 @@ intr_failed:
}
}
+/*
+ * Some devices have no battery (HDMI sticks) and the axp288 battery's
+ * detection reports one despite it not being there.
+ */
+static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+ {
+ /* Intel Cherry Trail Compute Stick, Windows version */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
+ },
+ },
+ {
+ /* Intel Cherry Trail Compute Stick, version without an OS */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
+ },
+ },
+ {
+ /* Meegopad T08 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by OEM."),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ },
+ {}
+};
+
static int axp288_fuel_gauge_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int i, ret = 0;
struct axp288_fg_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
+ static const char * const iio_chan_name[] = {
+ [BAT_TEMP] = "axp288-batt-temp",
+ [PMIC_TEMP] = "axp288-pmic-temp",
+ [SYSTEM_TEMP] = "axp288-system-temp",
+ [BAT_CHRG_CURR] = "axp288-chrg-curr",
+ [BAT_D_CURR] = "axp288-chrg-d-curr",
+ [BAT_VOLT] = "axp288-batt-volt",
+ };
+
+ if (dmi_check_system(axp288_fuel_gauge_blacklist))
+ return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -719,18 +743,39 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
mutex_init(&info->lock);
INIT_DELAYED_WORK(&info->status_monitor, fuel_gauge_status_monitor);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++) {
+ /*
+ * Note cannot use devm_iio_channel_get because x86 systems
+ * lack the device<->channel maps which iio_channel_get will
+ * try to use when passed a non NULL device pointer.
+ */
+ info->iio_channel[i] =
+ iio_channel_get(NULL, iio_chan_name[i]);
+ if (IS_ERR(info->iio_channel[i])) {
+ ret = PTR_ERR(info->iio_channel[i]);
+ dev_dbg(&pdev->dev, "error getting iiochan %s: %d\n",
+ iio_chan_name[i], ret);
+ /* Wait for axp288_adc to load */
+ if (ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+
+ goto out_free_iio_chan;
+ }
+ }
+
ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
if (!(ret & FG_DES_CAP1_VALID)) {
dev_err(&pdev->dev, "axp288 not configured by firmware\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free_iio_chan;
}
ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
case CHRG_CCCV_CV_4100MV:
info->max_volt = 4100;
@@ -751,7 +796,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
if (IS_ERR(info->bat)) {
ret = PTR_ERR(info->bat);
dev_err(&pdev->dev, "failed to register battery: %d\n", ret);
- return ret;
+ goto out_free_iio_chan;
}
fuel_gauge_create_debugfs(info);
@@ -759,6 +804,13 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
return 0;
+
+out_free_iio_chan:
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ if (!IS_ERR_OR_NULL(info->iio_channel[i]))
+ iio_channel_release(info->iio_channel[i]);
+
+ return ret;
}
static const struct platform_device_id axp288_fg_id_table[] = {
@@ -780,6 +832,9 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
if (info->irq[i] >= 0)
free_irq(info->irq[i], info);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ iio_channel_release(info->iio_channel[i]);
+
return 0;
}
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 35ff406aca48..b58df04d03b3 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/extcon.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
@@ -162,9 +161,6 @@ struct bq24190_dev_info {
struct device *dev;
struct power_supply *charger;
struct power_supply *battery;
- struct extcon_dev *extcon;
- struct notifier_block extcon_nb;
- struct delayed_work extcon_work;
struct delayed_work input_current_limit_work;
char model_name[I2C_NAME_SIZE];
bool initialized;
@@ -686,6 +682,16 @@ static int bq24190_register_reset(struct bq24190_dev_info *bdi)
int ret, limit = 100;
u8 v;
+ /*
+ * This prop. can be passed on device instantiation from platform code:
+ * struct property_entry pe[] =
+ * { PROPERTY_ENTRY_BOOL("disable-reset"), ... };
+ * struct i2c_board_info bi =
+ * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
+ * struct i2c_adapter ad = { ... };
+ * i2c_add_adapter(&ad);
+ * i2c_new_device(&ad, &bi);
+ */
if (device_property_read_bool(bdi->dev, "disable-reset"))
return 0;
@@ -1193,8 +1199,6 @@ static int bq24190_charger_set_property(struct power_supply *psy,
static int bq24190_charger_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- int ret;
-
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
@@ -1202,13 +1206,10 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- ret = 1;
- break;
+ return 1;
default:
- ret = 0;
+ return 0;
}
-
- return ret;
}
static void bq24190_input_current_limit_work(struct work_struct *work)
@@ -1623,75 +1624,6 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void bq24190_extcon_work(struct work_struct *work)
-{
- struct bq24190_dev_info *bdi =
- container_of(work, struct bq24190_dev_info, extcon_work.work);
- int error, iinlim = 0;
- u8 v;
-
- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
- dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- return;
- }
-
- if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_SDP) == 1)
- iinlim = 500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_CDP) == 1 ||
- extcon_get_state(bdi->extcon, EXTCON_CHG_USB_ACA) == 1)
- iinlim = 1500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_DCP) == 1)
- iinlim = 2000000;
-
- if (iinlim) {
- error = bq24190_set_field_val(bdi, BQ24190_REG_ISC,
- BQ24190_REG_ISC_IINLIM_MASK,
- BQ24190_REG_ISC_IINLIM_SHIFT,
- bq24190_isc_iinlim_values,
- ARRAY_SIZE(bq24190_isc_iinlim_values),
- iinlim);
- if (error < 0)
- dev_err(bdi->dev, "Can't set IINLIM: %d\n", error);
- }
-
- /* if no charger found and in USB host mode, set OTG 5V boost, else normal */
- if (!iinlim && extcon_get_state(bdi->extcon, EXTCON_USB_HOST) == 1)
- v = BQ24190_REG_POC_CHG_CONFIG_OTG;
- else
- v = BQ24190_REG_POC_CHG_CONFIG_CHARGE;
-
- error = bq24190_write_mask(bdi, BQ24190_REG_POC,
- BQ24190_REG_POC_CHG_CONFIG_MASK,
- BQ24190_REG_POC_CHG_CONFIG_SHIFT,
- v);
- if (error < 0)
- dev_err(bdi->dev, "Can't set CHG_CONFIG: %d\n", error);
-
- pm_runtime_mark_last_busy(bdi->dev);
- pm_runtime_put_autosuspend(bdi->dev);
-}
-
-static int bq24190_extcon_event(struct notifier_block *nb, unsigned long event,
- void *param)
-{
- struct bq24190_dev_info *bdi =
- container_of(nb, struct bq24190_dev_info, extcon_nb);
-
- /*
- * The Power-Good detection may take up to 220ms, sometimes
- * the external charger detection is quicker, and the bq24190 will
- * reset to iinlim based on its own charger detection (which is not
- * hooked up when using external charger detection) resulting in
- * a too low default 500mA iinlim. Delay applying the extcon value
- * for 300ms to avoid this.
- */
- queue_delayed_work(system_wq, &bdi->extcon_work, msecs_to_jiffies(300));
-
- return NOTIFY_OK;
-}
-
static int bq24190_hw_init(struct bq24190_dev_info *bdi)
{
u8 v;
@@ -1766,7 +1698,6 @@ static int bq24190_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct power_supply_config charger_cfg = {}, battery_cfg = {};
struct bq24190_dev_info *bdi;
- const char *name;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -1796,25 +1727,6 @@ static int bq24190_probe(struct i2c_client *client,
return -EINVAL;
}
- /*
- * Devicetree platforms should get extcon via phandle (not yet supported).
- * On ACPI platforms, extcon clients may invoke us with:
- * struct property_entry pe[] =
- * { PROPERTY_ENTRY_STRING("extcon-name", client_name), ... };
- * struct i2c_board_info bi =
- * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
- * struct i2c_adapter ad = { ... };
- * i2c_add_adapter(&ad);
- * i2c_new_device(&ad, &bi);
- */
- if (device_property_read_string(dev, "extcon-name", &name) == 0) {
- bdi->extcon = extcon_get_extcon_dev(name);
- if (!bdi->extcon)
- return -EPROBE_DEFER;
-
- dev_info(bdi->dev, "using extcon device %s\n", name);
- }
-
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 600);
@@ -1882,20 +1794,6 @@ static int bq24190_probe(struct i2c_client *client,
if (ret < 0)
goto out_sysfs;
- if (bdi->extcon) {
- INIT_DELAYED_WORK(&bdi->extcon_work, bq24190_extcon_work);
- bdi->extcon_nb.notifier_call = bq24190_extcon_event;
- ret = devm_extcon_register_notifier_all(dev, bdi->extcon,
- &bdi->extcon_nb);
- if (ret) {
- dev_err(dev, "Can't register extcon\n");
- goto out_sysfs;
- }
-
- /* Sync initial cable state */
- queue_delayed_work(system_wq, &bdi->extcon_work, 0);
- }
-
enable_irq_wake(client->irq);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 51f0961ecf3e..d99981542a46 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -323,6 +323,30 @@ static u8
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
BQ27XXX_DM_REG_ROWS,
},
+ bq27521_regs[BQ27XXX_REG_MAX] = {
+ [BQ27XXX_REG_CTRL] = 0x02,
+ [BQ27XXX_REG_TEMP] = 0x0a,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x0c,
+ [BQ27XXX_REG_AI] = 0x0e,
+ [BQ27XXX_REG_FLAGS] = 0x08,
+ [BQ27XXX_REG_TTE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_FCC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CTRL] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CLASS] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ },
bq27530_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -557,6 +581,15 @@ static enum power_supply_property bq27520g4_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27521_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
static enum power_supply_property bq27530_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -671,6 +704,7 @@ static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
#define bq27520g2_dm_regs 0
#define bq27520g3_dm_regs 0
#define bq27520g4_dm_regs 0
+#define bq27521_dm_regs 0
#define bq27530_dm_regs 0
#define bq27531_dm_regs 0
#define bq27541_dm_regs 0
@@ -717,8 +751,8 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = {
#endif
#define BQ27XXX_O_ZERO 0x00000001
-#define BQ27XXX_O_OTDC 0x00000002
-#define BQ27XXX_O_UTOT 0x00000004
+#define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */
+#define BQ27XXX_O_UTOT 0x00000004 /* has OT overtemperature flag */
#define BQ27XXX_O_CFGUP 0x00000008
#define BQ27XXX_O_RAM 0x00000010
@@ -751,6 +785,7 @@ static struct {
[BQ27520G2] = BQ27XXX_DATA(bq27520g2, 0 , BQ27XXX_O_OTDC),
[BQ27520G3] = BQ27XXX_DATA(bq27520g3, 0 , BQ27XXX_O_OTDC),
[BQ27520G4] = BQ27XXX_DATA(bq27520g4, 0 , BQ27XXX_O_OTDC),
+ [BQ27521] = BQ27XXX_DATA(bq27521, 0 , 0),
[BQ27530] = BQ27XXX_DATA(bq27530, 0 , BQ27XXX_O_UTOT),
[BQ27531] = BQ27XXX_DATA(bq27531, 0 , BQ27XXX_O_UTOT),
[BQ27541] = BQ27XXX_DATA(bq27541, 0 , BQ27XXX_O_OTDC),
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 0b11ed472f33..6b25e5f2337e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -239,6 +239,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27520g2", BQ27520G2 },
{ "bq27520g3", BQ27520G3 },
{ "bq27520g4", BQ27520G4 },
+ { "bq27521", BQ27521 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27531 },
{ "bq27541", BQ27541 },
@@ -269,6 +270,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27520g2" },
{ .compatible = "ti,bq27520g3" },
{ .compatible = "ti,bq27520g4" },
+ { .compatible = "ti,bq27521" },
{ .compatible = "ti,bq27530" },
{ .compatible = "ti,bq27531" },
{ .compatible = "ti,bq27541" },
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 6502fa7c2106..1de4b4493824 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -578,7 +578,7 @@ static int check_charging_duration(struct charger_manager *cm)
} else if (is_ext_pwr_online(cm) && !cm->charger_enabled) {
duration = curr - cm->charging_end_time;
- if (duration > desc->charging_max_duration_ms &&
+ if (duration > desc->discharging_max_duration_ms &&
is_ext_pwr_online(cm)) {
dev_info(cm->dev, "Discharging duration exceed %ums\n",
desc->discharging_max_duration_ms);
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index ee71a2b37b12..98ba07869c3b 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -586,8 +586,8 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
cpcap_battery_irq_thread,
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index 08e4fd9ee607..4cfa3f0cd689 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -60,6 +60,7 @@ enum ltc294x_id {
#define LTC294X_REG_CONTROL_PRESCALER_SET(x) \
((x << 3) & LTC294X_REG_CONTROL_PRESCALER_MASK)
#define LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED 0
+#define LTC294X_REG_CONTROL_ADC_DISABLE(x) ((x) & ~(BIT(7) | BIT(6)))
struct ltc294x_info {
struct i2c_client *client; /* I2C Client pointer */
@@ -523,6 +524,29 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
return 0;
}
+static void ltc294x_i2c_shutdown(struct i2c_client *client)
+{
+ struct ltc294x_info *info = i2c_get_clientdata(client);
+ int ret;
+ u8 value;
+ u8 control;
+
+ /* The LTC2941 does not need any special handling */
+ if (info->id == LTC2941_ID)
+ return;
+
+ /* Read control register */
+ ret = ltc294x_read_regs(info->client, LTC294X_REG_CONTROL, &value, 1);
+ if (ret < 0)
+ return;
+
+ /* Disable continuous ADC conversion as this drains the battery */
+ control = LTC294X_REG_CONTROL_ADC_DISABLE(value);
+ if (control != value)
+ ltc294x_write_regs(info->client, LTC294X_REG_CONTROL,
+ &control, 1);
+}
+
#ifdef CONFIG_PM_SLEEP
static int ltc294x_suspend(struct device *dev)
@@ -589,6 +613,7 @@ static struct i2c_driver ltc294x_driver = {
},
.probe = ltc294x_i2c_probe,
.remove = ltc294x_i2c_remove,
+ .shutdown = ltc294x_i2c_shutdown,
.id_table = ltc294x_i2c_id,
};
module_i2c_driver(ltc294x_driver);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 5b556a13f517..35dde81b1c9b 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -123,6 +123,8 @@ static int max17042_get_temperature(struct max17042_chip *chip, int *temp)
static int max17042_get_status(struct max17042_chip *chip, int *status)
{
int ret, charge_full, charge_now;
+ int avg_current;
+ u32 data;
ret = power_supply_am_i_supplied(chip->battery);
if (ret < 0) {
@@ -152,10 +154,31 @@ static int max17042_get_status(struct max17042_chip *chip, int *status)
if (ret < 0)
return ret;
- if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD)
+ if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD) {
*status = POWER_SUPPLY_STATUS_FULL;
- else
+ return 0;
+ }
+
+ /*
+ * Even though we are supplied, we may still be discharging if the
+ * supply is e.g. only delivering 5V 0.5A. Check current if available.
+ */
+ if (!chip->pdata->enable_current_sense) {
*status = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+ }
+
+ ret = regmap_read(chip->regmap, MAX17042_AvgCurrent, &data);
+ if (ret < 0)
+ return ret;
+
+ avg_current = sign_extend32(data, 15);
+ avg_current *= 1562500 / chip->pdata->r_sns;
+
+ if (avg_current > 0)
+ *status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ *status = POWER_SUPPLY_STATUS_DISCHARGING;
return 0;
}
@@ -863,16 +886,13 @@ static void max17042_init_worker(struct work_struct *work)
#ifdef CONFIG_OF
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_of_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct device_node *np = dev->of_node;
u32 prop;
struct max17042_platform_data *pdata;
- if (!np)
- return dev->platform_data;
-
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
@@ -897,7 +917,8 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
-#else
+#endif
+
static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
/*
* Some firmwares do not set FullSOCThr, Enable End-of-Charge Detection
@@ -907,15 +928,12 @@ static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
};
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_default_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct max17042_platform_data *pdata;
int ret, misc_cfg;
- if (dev->platform_data)
- return dev->platform_data;
-
/*
* The MAX17047 gets used on x86 where we might not have pdata, assume
* the firmware will already have initialized the fuel-gauge and provide
@@ -948,7 +966,21 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
+
+static struct max17042_platform_data *
+max17042_get_pdata(struct max17042_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+
+#ifdef CONFIG_OF
+ if (dev->of_node)
+ return max17042_get_of_pdata(chip);
#endif
+ if (dev->platform_data)
+ return dev->platform_data;
+
+ return max17042_get_default_pdata(chip);
+}
static const struct regmap_config max17042_regmap_config = {
.reg_bits = 8,
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index ccb4217b9638..cb6e8f66c7a2 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -183,7 +183,7 @@ static int sbsm_select(struct i2c_mux_core *muxc, u32 chan)
return ret;
/* chan goes from 1 ... 4 */
- reg = 1 << BIT(SBSM_SMB_BAT_OFFSET + chan);
+ reg = BIT(SBSM_SMB_BAT_OFFSET + chan);
ret = sbsm_write_word(data->client, SBSM_CMD_BATSYSSTATE, reg);
if (ret)
dev_err(dev, "Failed to select channel %i\n", chan);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index d1694f1def72..35636e1d8a3d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -29,6 +29,7 @@
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
+#include <linux/suspend.h>
#include <asm/iosf_mbi.h>
#include <asm/processor.h>
@@ -155,6 +156,7 @@ struct rapl_power_limit {
int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ u64 last_power_limit;
};
static const char pl1_name[] = "long_term";
@@ -1209,7 +1211,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
struct rapl_domain *rd;
char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
- int nr_pl, ret;;
+ int nr_pl, ret;
/* Update the domain data of the new package */
rapl_update_domain_data(rp);
@@ -1533,6 +1535,92 @@ static int rapl_cpu_down_prep(unsigned int cpu)
static enum cpuhp_state pcap_rapl_online;
+static void power_limit_state_save(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, ret, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT1,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT2,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static void power_limit_state_restore(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT1,
+ rd->rpl[i].last_power_limit);
+ break;
+ case PL2_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT2,
+ rd->rpl[i].last_power_limit);
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static int rapl_pm_callback(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_SUSPEND_PREPARE:
+ power_limit_state_save();
+ break;
+ case PM_POST_SUSPEND:
+ power_limit_state_restore();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_pm_notifier = {
+ .notifier_call = rapl_pm_callback,
+};
+
static int __init rapl_init(void)
{
const struct x86_cpu_id *id;
@@ -1560,8 +1648,16 @@ static int __init rapl_init(void)
/* Don't bail out if PSys is not supported */
rapl_register_psys();
+
+ ret = register_pm_notifier(&rapl_pm_notifier);
+ if (ret)
+ goto err_unreg_all;
+
return 0;
+err_unreg_all:
+ cpuhp_remove_state(pcap_rapl_online);
+
err_unreg:
rapl_unregister_powercap();
return ret;
@@ -1569,6 +1665,7 @@ err_unreg:
static void __exit rapl_exit(void)
{
+ unregister_pm_notifier(&rapl_pm_notifier);
cpuhp_remove_state(pcap_rapl_online);
rapl_unregister_powercap();
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 5b10b50f8686..64b2b2501a79 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -673,15 +673,13 @@ EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
static int __init powercap_init(void)
{
- int result = 0;
+ int result;
result = seed_constraint_attributes();
if (result)
return result;
- result = class_register(&powercap_class);
-
- return result;
+ return class_register(&powercap_class);
}
device_initcall(powercap_init);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 6eb0db37dd88..1d42385b1aa5 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -49,7 +49,7 @@ static DEFINE_IDR(pps_idr);
* Char device methods
*/
-static unsigned int pps_cdev_poll(struct file *file, poll_table *wait)
+static __poll_t pps_cdev_poll(struct file *file, poll_table *wait)
{
struct pps_device *pps = file->private_data;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 58a97d420572..a593b4cf47bf 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -280,7 +280,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
return err;
}
-unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
+__poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index b86f1bfecd6f..c7c62b782cb9 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -90,7 +90,7 @@ int ptp_open(struct posix_clock *pc, fmode_t fmode);
ssize_t ptp_read(struct posix_clock *pc,
uint flags, char __user *buf, size_t cnt);
-uint ptp_poll(struct posix_clock *pc,
+__poll_t ptp_poll(struct posix_clock *pc,
struct file *fp, poll_table *wait);
/*
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index ec4bc1515f0d..6092b3a5978e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2319,7 +2319,7 @@ static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
+static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
{
struct mport_cdev_priv *priv = filp->private_data;
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index ca44e6977cf2..2d9ec378a8bc 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -327,7 +327,7 @@ int cec_add_elem(u64 pfn)
} else {
/* We have reached max count for this page, soft-offline it. */
pr_err("Soft-offlining pfn: 0x%llx\n", pfn);
- memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
+ memory_failure_queue(pfn, MF_SOFT_OFFLINE);
ca->pfns_poisoned++;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 96cd55f9e3c5..b27417ca188a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -744,6 +744,13 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
+config REGULATOR_SC2731
+ tristate "Spreadtrum SC2731 power regulator driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ This driver provides support for the voltage regulators on the
+ SC2731 PMIC.
+
config REGULATOR_SKY81452
tristate "Skyworks Solutions SKY81452 voltage regulator"
depends on MFD_SKY81452
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 80ffc57a9ca3..19fea09ba10a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b64b7916507f..42681c10cbe4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -58,8 +58,6 @@ static bool has_full_constraints;
static struct dentry *debugfs_root;
-static struct class regulator_class;
-
/*
* struct regulator_map
*
@@ -112,11 +110,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name);
static void _regulator_put(struct regulator *regulator);
-static struct regulator_dev *dev_to_rdev(struct device *dev)
-{
- return container_of(dev, struct regulator_dev, dev);
-}
-
static const char *rdev_get_name(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->name)
@@ -236,26 +229,35 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
return 0;
}
+/* return 0 if the state is valid */
+static int regulator_check_states(suspend_state_t state)
+{
+ return (state > PM_SUSPEND_MAX || state == PM_SUSPEND_TO_IDLE);
+}
+
/* Make sure we select a voltage that suits the needs of all
* regulator consumers
*/
static int regulator_check_consumers(struct regulator_dev *rdev,
- int *min_uV, int *max_uV)
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
{
struct regulator *regulator;
+ struct regulator_voltage *voltage;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
/*
* Assume consumers that didn't say anything are OK
* with anything in the constraint range.
*/
- if (!regulator->min_uV && !regulator->max_uV)
+ if (!voltage->min_uV && !voltage->max_uV)
continue;
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+ if (*max_uV > voltage->max_uV)
+ *max_uV = voltage->max_uV;
+ if (*min_uV < voltage->min_uV)
+ *min_uV = voltage->min_uV;
}
if (*min_uV > *max_uV) {
@@ -324,6 +326,24 @@ static int regulator_mode_constrain(struct regulator_dev *rdev,
return -EINVAL;
}
+static inline struct regulator_state *
+regulator_get_suspend_state(struct regulator_dev *rdev, suspend_state_t state)
+{
+ if (rdev->constraints == NULL)
+ return NULL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return &rdev->constraints->state_standby;
+ case PM_SUSPEND_MEM:
+ return &rdev->constraints->state_mem;
+ case PM_SUSPEND_MAX:
+ return &rdev->constraints->state_disk;
+ default:
+ return NULL;
+ }
+}
+
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -731,29 +751,32 @@ static int drms_uA_update(struct regulator_dev *rdev)
}
static int suspend_set_state(struct regulator_dev *rdev,
- struct regulator_state *rstate)
+ suspend_state_t state)
{
int ret = 0;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
/* If we have no suspend mode configration don't set anything;
* only warn if the driver implements set_suspend_voltage or
* set_suspend_mode callback.
*/
- if (!rstate->enabled && !rstate->disabled) {
+ if (rstate->enabled != ENABLE_IN_SUSPEND &&
+ rstate->enabled != DISABLE_IN_SUSPEND) {
if (rdev->desc->ops->set_suspend_voltage ||
rdev->desc->ops->set_suspend_mode)
rdev_warn(rdev, "No configuration\n");
return 0;
}
- if (rstate->enabled && rstate->disabled) {
- rdev_err(rdev, "invalid configuration\n");
- return -EINVAL;
- }
-
- if (rstate->enabled && rdev->desc->ops->set_suspend_enable)
+ if (rstate->enabled == ENABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_enable)
ret = rdev->desc->ops->set_suspend_enable(rdev);
- else if (rstate->disabled && rdev->desc->ops->set_suspend_disable)
+ else if (rstate->enabled == DISABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_disable)
ret = rdev->desc->ops->set_suspend_disable(rdev);
else /* OK if set_suspend_enable or set_suspend_disable is NULL */
ret = 0;
@@ -778,28 +801,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
return ret;
}
}
- return ret;
-}
-
-/* locks held by caller */
-static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
-{
- if (!rdev->constraints)
- return -EINVAL;
- switch (state) {
- case PM_SUSPEND_STANDBY:
- return suspend_set_state(rdev,
- &rdev->constraints->state_standby);
- case PM_SUSPEND_MEM:
- return suspend_set_state(rdev,
- &rdev->constraints->state_mem);
- case PM_SUSPEND_MAX:
- return suspend_set_state(rdev,
- &rdev->constraints->state_disk);
- default:
- return -EINVAL;
- }
+ return ret;
}
static void print_constraints(struct regulator_dev *rdev)
@@ -1068,7 +1071,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
- ret = suspend_prepare(rdev, rdev->constraints->initial_state);
+ ret = suspend_set_state(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
return ret;
@@ -1356,9 +1359,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->min_uV);
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->max_uV);
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
debugfs_create_file("constraint_flags", 0444,
regulator->debugfs, regulator,
&constraint_flags_fops);
@@ -1417,20 +1420,6 @@ static void regulator_supply_alias(struct device **dev, const char **supply)
}
}
-static int of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-static struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
-{
- struct device *dev;
-
- dev = class_find_device(&regulator_class, NULL, np, of_node_match);
-
- return dev ? dev_to_rdev(dev) : NULL;
-}
-
static int regulator_match(struct device *dev, const void *data)
{
struct regulator_dev *r = dev_to_rdev(dev);
@@ -2468,10 +2457,9 @@ static int _regulator_is_enabled(struct regulator_dev *rdev)
return rdev->desc->ops->is_enabled(rdev);
}
-static int _regulator_list_voltage(struct regulator *regulator,
- unsigned selector, int lock)
+static int _regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector, int lock)
{
- struct regulator_dev *rdev = regulator->rdev;
const struct regulator_ops *ops = rdev->desc->ops;
int ret;
@@ -2487,7 +2475,8 @@ static int _regulator_list_voltage(struct regulator *regulator,
if (lock)
mutex_unlock(&rdev->mutex);
} else if (rdev->is_switch && rdev->supply) {
- ret = _regulator_list_voltage(rdev->supply, selector, lock);
+ ret = _regulator_list_voltage(rdev->supply->rdev,
+ selector, lock);
} else {
return -EINVAL;
}
@@ -2563,7 +2552,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
- return _regulator_list_voltage(regulator, selector, 1);
+ return _regulator_list_voltage(regulator->rdev, selector, 1);
}
EXPORT_SYMBOL_GPL(regulator_list_voltage);
@@ -2605,8 +2594,8 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap)
return -EOPNOTSUPP;
- *vsel_reg = rdev->desc->vsel_reg;
- *vsel_mask = rdev->desc->vsel_mask;
+ *vsel_reg = rdev->desc->vsel_reg;
+ *vsel_mask = rdev->desc->vsel_mask;
return 0;
}
@@ -2897,10 +2886,38 @@ out:
return ret;
}
+static int _regulator_do_set_suspend_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, suspend_state_t state)
+{
+ struct regulator_state *rstate;
+ int uV, sel;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (min_uV < rstate->min_uV)
+ min_uV = rstate->min_uV;
+ if (max_uV > rstate->max_uV)
+ max_uV = rstate->max_uV;
+
+ sel = regulator_map_voltage(rdev, min_uV, max_uV);
+ if (sel < 0)
+ return sel;
+
+ uV = rdev->desc->ops->list_voltage(rdev, sel);
+ if (uV >= min_uV && uV <= max_uV)
+ rstate->uV = uV;
+
+ return 0;
+}
+
static int regulator_set_voltage_unlocked(struct regulator *regulator,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ suspend_state_t state)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[state];
int ret = 0;
int old_min_uV, old_max_uV;
int current_uV;
@@ -2911,7 +2928,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* should be a noop (some cpufreq implementations use the same
* voltage for multiple frequencies, for example).
*/
- if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+ if (voltage->min_uV == min_uV && voltage->max_uV == max_uV)
goto out;
/* If we're trying to set a range that overlaps the current voltage,
@@ -2921,8 +2938,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
current_uV = _regulator_get_voltage(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
goto out;
}
}
@@ -2940,12 +2957,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out;
/* restore original values in case of error */
- old_min_uV = regulator->min_uV;
- old_max_uV = regulator->max_uV;
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ old_min_uV = voltage->min_uV;
+ old_max_uV = voltage->max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
if (ret < 0)
goto out2;
@@ -2963,7 +2980,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out2;
}
- best_supply_uV = _regulator_list_voltage(regulator, selector, 0);
+ best_supply_uV = _regulator_list_voltage(rdev, selector, 0);
if (best_supply_uV < 0) {
ret = best_supply_uV;
goto out2;
@@ -2982,7 +2999,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (supply_change_uV > 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret) {
dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
ret);
@@ -2990,13 +3007,17 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
}
}
- ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ if (state == PM_SUSPEND_ON)
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ else
+ ret = _regulator_do_set_suspend_voltage(rdev, min_uV,
+ max_uV, state);
if (ret < 0)
goto out2;
if (supply_change_uV < 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret)
dev_warn(&rdev->dev, "Failed to decrease supply voltage: %d\n",
ret);
@@ -3007,8 +3028,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
out:
return ret;
out2:
- regulator->min_uV = old_min_uV;
- regulator->max_uV = old_max_uV;
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
return ret;
}
@@ -3037,7 +3058,8 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
regulator_lock_supply(regulator->rdev);
- ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV);
+ ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV,
+ PM_SUSPEND_ON);
regulator_unlock_supply(regulator->rdev);
@@ -3045,6 +3067,89 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
+static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
+ suspend_state_t state, bool en)
+{
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (!rstate->changeable)
+ return -EPERM;
+
+ rstate->enabled = en;
+
+ return 0;
+}
+
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return regulator_suspend_toggle(rdev, state, true);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_enable);
+
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator *regulator;
+ struct regulator_voltage *voltage;
+
+ /*
+ * if any consumer wants this regulator device keeping on in
+ * suspend states, don't set it as disabled.
+ */
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
+ if (voltage->min_uV || voltage->max_uV)
+ return 0;
+ }
+
+ return regulator_suspend_toggle(rdev, state, false);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_disable);
+
+static int _regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (rstate->min_uV == rstate->max_uV) {
+ rdev_err(rdev, "The suspend voltage can't be changed!\n");
+ return -EPERM;
+ }
+
+ return regulator_set_voltage_unlocked(regulator, min_uV, max_uV, state);
+}
+
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state)
+{
+ int ret = 0;
+
+ /* PM_SUSPEND_ON is handled by regulator_set_voltage() */
+ if (regulator_check_states(state) || state == PM_SUSPEND_ON)
+ return -EINVAL;
+
+ regulator_lock_supply(regulator->rdev);
+
+ ret = _regulator_set_suspend_voltage(regulator, min_uV,
+ max_uV, state);
+
+ regulator_unlock_supply(regulator->rdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage);
+
/**
* regulator_set_voltage_time - get raise/fall time
* @regulator: regulator source
@@ -3138,6 +3243,7 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
int regulator_sync_voltage(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON];
int ret, min_uV, max_uV;
mutex_lock(&rdev->mutex);
@@ -3149,20 +3255,20 @@ int regulator_sync_voltage(struct regulator *regulator)
}
/* This is only going to work if we've had a voltage configured. */
- if (!regulator->min_uV && !regulator->max_uV) {
+ if (!voltage->min_uV && !voltage->max_uV) {
ret = -EINVAL;
goto out;
}
- min_uV = regulator->min_uV;
- max_uV = regulator->max_uV;
+ min_uV = voltage->min_uV;
+ max_uV = voltage->max_uV;
/* This should be a paranoia check... */
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, 0);
if (ret < 0)
goto out;
@@ -3918,12 +4024,6 @@ static void regulator_dev_release(struct device *dev)
kfree(rdev);
}
-static struct class regulator_class = {
- .name = "regulator",
- .dev_release = regulator_dev_release,
- .dev_groups = regulator_dev_groups,
-};
-
static void rdev_init_debugfs(struct regulator_dev *rdev)
{
struct device *parent = rdev->dev.parent;
@@ -4174,81 +4274,86 @@ void regulator_unregister(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unregister);
-static int _regulator_suspend_prepare(struct device *dev, void *data)
+#ifdef CONFIG_SUSPEND
+static int _regulator_suspend_late(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const suspend_state_t *state = data;
+ suspend_state_t *state = data;
int ret;
mutex_lock(&rdev->mutex);
- ret = suspend_prepare(rdev, *state);
+ ret = suspend_set_state(rdev, *state);
mutex_unlock(&rdev->mutex);
return ret;
}
/**
- * regulator_suspend_prepare - prepare regulators for system wide suspend
+ * regulator_suspend_late - prepare regulators for system wide suspend
* @state: system suspend state
*
* Configure each regulator with it's suspend operating parameters for state.
- * This will usually be called by machine suspend code prior to supending.
*/
-int regulator_suspend_prepare(suspend_state_t state)
+static int regulator_suspend_late(struct device *dev)
{
- /* ON is handled by regulator active state */
- if (state == PM_SUSPEND_ON)
- return -EINVAL;
+ suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend_prepare);
+ _regulator_suspend_late);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
-
-static int _regulator_suspend_finish(struct device *dev, void *data)
+static int _regulator_resume_early(struct device *dev, void *data)
{
+ int ret = 0;
struct regulator_dev *rdev = dev_to_rdev(dev);
- int ret;
+ suspend_state_t *state = data;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, *state);
+ if (rstate == NULL)
+ return -EINVAL;
mutex_lock(&rdev->mutex);
- if (rdev->use_count > 0 || rdev->constraints->always_on) {
- if (!_regulator_is_enabled(rdev)) {
- ret = _regulator_do_enable(rdev);
- if (ret)
- dev_err(dev,
- "Failed to resume regulator %d\n",
- ret);
- }
- } else {
- if (!have_full_constraints())
- goto unlock;
- if (!_regulator_is_enabled(rdev))
- goto unlock;
- ret = _regulator_do_disable(rdev);
- if (ret)
- dev_err(dev, "Failed to suspend regulator %d\n", ret);
- }
-unlock:
+ if (rdev->desc->ops->resume_early &&
+ (rstate->enabled == ENABLE_IN_SUSPEND ||
+ rstate->enabled == DISABLE_IN_SUSPEND))
+ ret = rdev->desc->ops->resume_early(rdev);
+
mutex_unlock(&rdev->mutex);
- /* Keep processing regulators in spite of any errors */
- return 0;
+ return ret;
}
-/**
- * regulator_suspend_finish - resume regulators from system wide suspend
- *
- * Turn on regulators that might be turned off by regulator_suspend_prepare
- * and that should be turned on according to the regulators properties.
- */
-int regulator_suspend_finish(void)
+static int regulator_resume_early(struct device *dev)
{
- return class_for_each_device(&regulator_class, NULL, NULL,
- _regulator_suspend_finish);
+ suspend_state_t state = pm_suspend_target_state;
+
+ return class_for_each_device(&regulator_class, NULL, &state,
+ _regulator_resume_early);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_finish);
+#else /* !CONFIG_SUSPEND */
+
+#define regulator_suspend_late NULL
+#define regulator_resume_early NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
+ .suspend_late = regulator_suspend_late,
+ .resume_early = regulator_resume_early,
+};
+#endif
+
+struct class regulator_class = {
+ .name = "regulator",
+ .dev_release = regulator_dev_release,
+ .dev_groups = regulator_dev_groups,
+#ifdef CONFIG_PM
+ .pm = &regulator_pm_ops,
+#endif
+};
/**
* regulator_has_full_constraints - the system has fully specified constraints
*
@@ -4424,8 +4529,8 @@ static void regulator_summary_show_subtree(struct seq_file *s,
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
seq_printf(s, "%37dmV %5dmV",
- consumer->min_uV / 1000,
- consumer->max_uV / 1000);
+ consumer->voltage[PM_SUSPEND_ON].min_uV / 1000,
+ consumer->voltage[PM_SUSPEND_ON].max_uV / 1000);
break;
case REGULATOR_CURRENT:
break;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 66a8ea0c8386..abfd56e8c78a 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -16,10 +16,25 @@
#ifndef __REGULATOR_INTERNAL_H
#define __REGULATOR_INTERNAL_H
+#include <linux/suspend.h>
+
+#define REGULATOR_STATES_NUM (PM_SUSPEND_MAX + 1)
+
+struct regulator_voltage {
+ int min_uV;
+ int max_uV;
+};
+
/*
* struct regulator
*
* One for each consumer device.
+ * @voltage - a voltage array for each state of runtime, i.e.:
+ * PM_SUSPEND_ON
+ * PM_SUSPEND_TO_IDLE
+ * PM_SUSPEND_STANDBY
+ * PM_SUSPEND_MEM
+ * PM_SUSPEND_MAX
*/
struct regulator {
struct device *dev;
@@ -27,14 +42,22 @@ struct regulator {
unsigned int always_on:1;
unsigned int bypass:1;
int uA_load;
- int min_uV;
- int max_uV;
+ struct regulator_voltage voltage[REGULATOR_STATES_NUM];
const char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
struct dentry *debugfs;
};
+extern struct class regulator_class;
+
+static inline struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct regulator_dev, dev);
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
+
#ifdef CONFIG_OF
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 14637a01ba2d..092ed6efb3ec 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -177,14 +177,30 @@ static void of_get_regulation_constraints(struct device_node *np,
if (of_property_read_bool(suspend_np,
"regulator-on-in-suspend"))
- suspend_state->enabled = true;
+ suspend_state->enabled = ENABLE_IN_SUSPEND;
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
- suspend_state->disabled = true;
+ suspend_state->enabled = DISABLE_IN_SUSPEND;
+ else
+ suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
+
+ if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
+ &pval))
+ suspend_state->min_uV = pval;
+
+ if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
+ &pval))
+ suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-microvolt", &pval))
suspend_state->uV = pval;
+ else /* otherwise use min_uV as default suspend voltage */
+ suspend_state->uV = suspend_state->min_uV;
+
+ if (of_property_read_bool(suspend_np,
+ "regulator-changeable-in-suspend"))
+ suspend_state->changeable = true;
if (i == PM_SUSPEND_MEM)
constraints->initial_state = PM_SUSPEND_MEM;
@@ -376,3 +392,17 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
return init_data;
}
+
+static int of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&regulator_class, NULL, np, of_node_match);
+
+ return dev ? dev_to_rdev(dev) : NULL;
+}
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 0241ada47d04..63c7a0c17777 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -486,24 +486,6 @@ static int spmi_vreg_update_bits(struct spmi_regulator *vreg, u16 addr, u8 val,
return regmap_update_bits(vreg->regmap, vreg->base + addr, mask, val);
}
-static int spmi_regulator_common_is_enabled(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- u8 reg;
-
- spmi_vreg_read(vreg, SPMI_COMMON_REG_ENABLE, &reg, 1);
-
- return (reg & SPMI_COMMON_ENABLE_MASK) == SPMI_COMMON_ENABLE;
-}
-
-static int spmi_regulator_common_enable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_ENABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -513,7 +495,7 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
vreg->vs_enable_time = ktime_get();
}
- return spmi_regulator_common_enable(rdev);
+ return regulator_enable_regmap(rdev);
}
static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
@@ -524,14 +506,6 @@ static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
}
-static int spmi_regulator_common_disable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_DISABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
int min_uV, int max_uV)
{
@@ -1062,9 +1036,9 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
}
static struct regulator_ops spmi_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1077,9 +1051,9 @@ static struct regulator_ops spmi_smps_ops = {
};
static struct regulator_ops spmi_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1094,9 +1068,9 @@ static struct regulator_ops spmi_ldo_ops = {
};
static struct regulator_ops spmi_ln_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1107,8 +1081,8 @@ static struct regulator_ops spmi_ln_ldo_ops = {
static struct regulator_ops spmi_vs_ops = {
.enable = spmi_regulator_vs_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
.set_over_current_protection = spmi_regulator_vs_ocp,
@@ -1117,9 +1091,9 @@ static struct regulator_ops spmi_vs_ops = {
};
static struct regulator_ops spmi_boost_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1128,9 +1102,9 @@ static struct regulator_ops spmi_boost_ops = {
};
static struct regulator_ops spmi_ftsmps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1143,9 +1117,9 @@ static struct regulator_ops spmi_ftsmps_ops = {
};
static struct regulator_ops spmi_ult_lo_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
@@ -1157,9 +1131,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
};
static struct regulator_ops spmi_ult_ho_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
@@ -1172,9 +1146,9 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
};
static struct regulator_ops spmi_ult_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1711,6 +1685,9 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
vreg->desc.id = -1;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.enable_reg = reg->base + SPMI_COMMON_REG_ENABLE;
+ vreg->desc.enable_mask = SPMI_COMMON_ENABLE_MASK;
+ vreg->desc.enable_val = SPMI_COMMON_ENABLE;
vreg->desc.name = name = reg->name;
vreg->desc.supply_name = reg->supply;
vreg->desc.of_match = reg->name;
@@ -1723,6 +1700,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = vreg;
+ config.regmap = regmap;
rdev = devm_regulator_register(dev, &vreg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", name);
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
new file mode 100644
index 000000000000..eb2bdf060b7b
--- /dev/null
+++ b/drivers/regulator/sc2731-regulator.c
@@ -0,0 +1,256 @@
+ //SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * SC2731 regulator lock register
+ */
+#define SC2731_PWR_WR_PROT 0xf0c
+#define SC2731_WR_UNLOCK_VALUE 0x6e7f
+
+/*
+ * SC2731 enable register
+ */
+#define SC2731_POWER_PD_SW 0xc28
+#define SC2731_LDO_CAMA0_PD 0xcfc
+#define SC2731_LDO_CAMA1_PD 0xd04
+#define SC2731_LDO_CAMMOT_PD 0xd0c
+#define SC2731_LDO_VLDO_PD 0xd6c
+#define SC2731_LDO_EMMCCORE_PD 0xd2c
+#define SC2731_LDO_SDCORE_PD 0xd74
+#define SC2731_LDO_SDIO_PD 0xd70
+#define SC2731_LDO_WIFIPA_PD 0xd4c
+#define SC2731_LDO_USB33_PD 0xd5c
+#define SC2731_LDO_CAMD0_PD 0xd7c
+#define SC2731_LDO_CAMD1_PD 0xd84
+#define SC2731_LDO_CON_PD 0xd8c
+#define SC2731_LDO_CAMIO_PD 0xd94
+#define SC2731_LDO_SRAM_PD 0xd78
+
+/*
+ * SC2731 enable mask
+ */
+#define SC2731_DCDC_CPU0_PD_MASK BIT(4)
+#define SC2731_DCDC_CPU1_PD_MASK BIT(3)
+#define SC2731_DCDC_RF_PD_MASK BIT(11)
+#define SC2731_LDO_CAMA0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMA1_PD_MASK BIT(0)
+#define SC2731_LDO_CAMMOT_PD_MASK BIT(0)
+#define SC2731_LDO_VLDO_PD_MASK BIT(0)
+#define SC2731_LDO_EMMCCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDIO_PD_MASK BIT(0)
+#define SC2731_LDO_WIFIPA_PD_MASK BIT(0)
+#define SC2731_LDO_USB33_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD1_PD_MASK BIT(0)
+#define SC2731_LDO_CON_PD_MASK BIT(0)
+#define SC2731_LDO_CAMIO_PD_MASK BIT(0)
+#define SC2731_LDO_SRAM_PD_MASK BIT(0)
+
+/*
+ * SC2731 vsel register
+ */
+#define SC2731_DCDC_CPU0_VOL 0xc54
+#define SC2731_DCDC_CPU1_VOL 0xc64
+#define SC2731_DCDC_RF_VOL 0xcb8
+#define SC2731_LDO_CAMA0_VOL 0xd00
+#define SC2731_LDO_CAMA1_VOL 0xd08
+#define SC2731_LDO_CAMMOT_VOL 0xd10
+#define SC2731_LDO_VLDO_VOL 0xd28
+#define SC2731_LDO_EMMCCORE_VOL 0xd30
+#define SC2731_LDO_SDCORE_VOL 0xd38
+#define SC2731_LDO_SDIO_VOL 0xd40
+#define SC2731_LDO_WIFIPA_VOL 0xd50
+#define SC2731_LDO_USB33_VOL 0xd60
+#define SC2731_LDO_CAMD0_VOL 0xd80
+#define SC2731_LDO_CAMD1_VOL 0xd88
+#define SC2731_LDO_CON_VOL 0xd90
+#define SC2731_LDO_CAMIO_VOL 0xd98
+#define SC2731_LDO_SRAM_VOL 0xdB0
+
+/*
+ * SC2731 vsel register mask
+ */
+#define SC2731_DCDC_CPU0_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_CPU1_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_RF_VOL_MASK GENMASK(8, 0)
+#define SC2731_LDO_CAMA0_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMA1_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMMOT_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_VLDO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_EMMCCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDIO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_WIFIPA_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_USB33_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMD0_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMD1_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CON_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMIO_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_SRAM_VOL_MASK GENMASK(6, 0)
+
+enum sc2731_regulator_id {
+ SC2731_BUCK_CPU0,
+ SC2731_BUCK_CPU1,
+ SC2731_BUCK_RF,
+ SC2731_LDO_CAMA0,
+ SC2731_LDO_CAMA1,
+ SC2731_LDO_CAMMOT,
+ SC2731_LDO_VLDO,
+ SC2731_LDO_EMMCCORE,
+ SC2731_LDO_SDCORE,
+ SC2731_LDO_SDIO,
+ SC2731_LDO_WIFIPA,
+ SC2731_LDO_USB33,
+ SC2731_LDO_CAMD0,
+ SC2731_LDO_CAMD1,
+ SC2731_LDO_CON,
+ SC2731_LDO_CAMIO,
+ SC2731_LDO_SRAM,
+};
+
+static const struct regulator_ops sc2731_regu_linear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define SC2731_REGU_LINEAR(_id, en_reg, en_mask, vreg, vmask, \
+ vstep, vmin, vmax) { \
+ .name = #_id, \
+ .of_match = of_match_ptr(#_id), \
+ .ops = &sc2731_regu_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = SC2731_##_id, \
+ .owner = THIS_MODULE, \
+ .min_uV = vmin, \
+ .n_voltages = ((vmax) - (vmin)) / (vstep) + 1, \
+ .uV_step = vstep, \
+ .enable_is_inverted = true, \
+ .enable_val = 0, \
+ .enable_reg = en_reg, \
+ .enable_mask = en_mask, \
+ .vsel_reg = vreg, \
+ .vsel_mask = vmask, \
+}
+
+static struct regulator_desc regulators[] = {
+ SC2731_REGU_LINEAR(BUCK_CPU0, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU0_PD_MASK, SC2731_DCDC_CPU0_VOL,
+ SC2731_DCDC_CPU0_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_CPU1, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU1_PD_MASK, SC2731_DCDC_CPU1_VOL,
+ SC2731_DCDC_CPU1_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_RF, SC2731_POWER_PD_SW, SC2731_DCDC_RF_PD_MASK,
+ SC2731_DCDC_RF_VOL, SC2731_DCDC_RF_VOL_MASK,
+ 3125, 600000, 2196875),
+ SC2731_REGU_LINEAR(LDO_CAMA0, SC2731_LDO_CAMA0_PD,
+ SC2731_LDO_CAMA0_PD_MASK, SC2731_LDO_CAMA0_VOL,
+ SC2731_LDO_CAMA0_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMA1, SC2731_LDO_CAMA1_PD,
+ SC2731_LDO_CAMA1_PD_MASK, SC2731_LDO_CAMA1_VOL,
+ SC2731_LDO_CAMA1_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMMOT, SC2731_LDO_CAMMOT_PD,
+ SC2731_LDO_CAMMOT_PD_MASK, SC2731_LDO_CAMMOT_VOL,
+ SC2731_LDO_CAMMOT_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_VLDO, SC2731_LDO_VLDO_PD,
+ SC2731_LDO_VLDO_PD_MASK, SC2731_LDO_VLDO_VOL,
+ SC2731_LDO_VLDO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_EMMCCORE, SC2731_LDO_EMMCCORE_PD,
+ SC2731_LDO_EMMCCORE_PD_MASK, SC2731_LDO_EMMCCORE_VOL,
+ SC2731_LDO_EMMCCORE_VOL_MASK, 10000, 1200000,
+ 3750000),
+ SC2731_REGU_LINEAR(LDO_SDCORE, SC2731_LDO_SDCORE_PD,
+ SC2731_LDO_SDCORE_PD_MASK, SC2731_LDO_SDCORE_VOL,
+ SC2731_LDO_SDCORE_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_SDIO, SC2731_LDO_SDIO_PD,
+ SC2731_LDO_SDIO_PD_MASK, SC2731_LDO_SDIO_VOL,
+ SC2731_LDO_SDIO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_WIFIPA, SC2731_LDO_WIFIPA_PD,
+ SC2731_LDO_WIFIPA_PD_MASK, SC2731_LDO_WIFIPA_VOL,
+ SC2731_LDO_WIFIPA_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_USB33, SC2731_LDO_USB33_PD,
+ SC2731_LDO_USB33_PD_MASK, SC2731_LDO_USB33_VOL,
+ SC2731_LDO_USB33_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMD0, SC2731_LDO_CAMD0_PD,
+ SC2731_LDO_CAMD0_PD_MASK, SC2731_LDO_CAMD0_VOL,
+ SC2731_LDO_CAMD0_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMD1, SC2731_LDO_CAMD1_PD,
+ SC2731_LDO_CAMD1_PD_MASK, SC2731_LDO_CAMD1_VOL,
+ SC2731_LDO_CAMD1_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CON, SC2731_LDO_CON_PD,
+ SC2731_LDO_CON_PD_MASK, SC2731_LDO_CON_VOL,
+ SC2731_LDO_CON_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMIO, SC2731_LDO_CAMIO_PD,
+ SC2731_LDO_CAMIO_PD_MASK, SC2731_LDO_CAMIO_VOL,
+ SC2731_LDO_CAMIO_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_SRAM, SC2731_LDO_SRAM_PD,
+ SC2731_LDO_SRAM_PD_MASK, SC2731_LDO_SRAM_VOL,
+ SC2731_LDO_SRAM_VOL_MASK, 6250, 1000000, 1793750),
+};
+
+static int sc2731_regulator_unlock(struct regmap *regmap)
+{
+ return regmap_write(regmap, SC2731_PWR_WR_PROT,
+ SC2731_WR_UNLOCK_VALUE);
+}
+
+static int sc2731_regulator_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct regmap *regmap;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "failed to get regmap.\n");
+ return -ENODEV;
+ }
+
+ ret = sc2731_regulator_unlock(regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to release regulator lock\n");
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver sc2731_regulator_driver = {
+ .driver = {
+ .name = "sc27xx-regulator",
+ },
+ .probe = sc2731_regulator_probe,
+};
+
+module_platform_driver(sc2731_regulator_driver);
+
+MODULE_AUTHOR("Chen Junhui <erick.chen@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC2731 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index bc489958fed7..1827185beacc 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -28,9 +28,6 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65218.h>
-enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
- DCDC5, DCDC6, LDO1, LS3 };
-
#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
_em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
@@ -329,6 +326,8 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65218_NUM_REGULATOR, GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index b01774e9fac0..e540ca362d08 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -919,12 +919,12 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_smd_send(qsept->qsch, data, len, false);
}
-static unsigned int qcom_smd_poll(struct rpmsg_endpoint *ept,
+static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
struct qcom_smd_channel *channel = qsept->qsch;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &channel->fblockread_event, wait);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index e0996fce3963..e622fcda30fa 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -256,10 +256,10 @@ free_kbuf:
return ret < 0 ? ret : len;
}
-static unsigned int rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
+static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
{
struct rpmsg_eptdev *eptdev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!eptdev->ept)
return POLLERR;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index dffa3aab7178..5a081762afcc 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -247,7 +247,7 @@ EXPORT_SYMBOL(rpmsg_trysendto);
*
* Returns mask representing the current state of the endpoint's send buffers
*/
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait)
{
if (WARN_ON(!ept))
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 0cf9c7e2ee83..685aa70e9cbe 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -71,7 +71,7 @@ struct rpmsg_endpoint_ops {
int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
- unsigned int (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
+ __poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
};
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 215eac68ae2d..5a7b30d0773b 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -194,7 +194,7 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return ret;
}
-static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
{
struct rtc_device *rtc = file->private_data;
unsigned long data;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index a7917d473774..0c075d100252 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -661,9 +661,9 @@ static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
return effective_count;
}
-static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
+static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
struct eerbuffer *eerb;
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index bf4ab4efed73..956f662908a6 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -429,7 +429,7 @@ out_copy:
return count;
}
-static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 00e7968a1d70..b42c9c479d4b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -369,7 +369,6 @@ out:
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -392,8 +391,8 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -473,11 +472,10 @@ out:
static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -499,9 +497,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
schedulertime = cpu_to_le32(schedulertime % 604800);
memcpy(param->data, &schedulertime, sizeof(u32));
@@ -648,8 +645,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
TW_Command_Full *full_command_packet;
TW_Compatibility_Info *tw_compat_info;
TW_Event *event;
- struct timeval current_time;
- u32 current_time_ms;
+ ktime_t current_time;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
void __user *argp = (void __user *)arg;
@@ -840,17 +836,17 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
break;
case TW_IOCTL_GET_LOCK:
tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
- do_gettimeofday(&current_time);
- current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
+ current_time = ktime_get();
- if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
+ if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
+ ktime_after(current_time, tw_dev->ioctl_time)) {
tw_dev->ioctl_sem_lock = 1;
- tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
+ tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
tw_ioctl->driver_command.status = 0;
tw_lock->time_remaining_msec = tw_lock->timeout_msec;
} else {
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
- tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
+ tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
}
break;
case TW_IOCTL_RELEASE_LOCK:
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index b6c208cc474f..d88cd3499bd5 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -666,7 +666,7 @@ typedef struct TAG_TW_Device_Extension {
unsigned char event_queue_wrapped;
unsigned int error_sequence_id;
int ioctl_sem_lock;
- u32 ioctl_msec;
+ ktime_t ioctl_time;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b150e131b2e7..cf9f2a09b47d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -221,7 +221,6 @@ out:
static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -240,8 +239,8 @@ static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -408,11 +407,10 @@ out:
static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -434,10 +432,9 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
- schedulertime = cpu_to_le32(schedulertime % 604800);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
+ schedulertime = cpu_to_le32(schedulertime);
memcpy(param->data, &schedulertime, sizeof(u32));
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index af3e4d3f9735..e7961cbd2c55 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -42,6 +42,8 @@
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -913,8 +915,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
memset(str, ' ', sizeof(*str));
if (sup_adap_info->adapter_type_text[0]) {
- char *cp = sup_adap_info->adapter_type_text;
int c;
+ char *cp;
+ char *cname = kmemdup(sup_adap_info->adapter_type_text,
+ sizeof(sup_adap_info->adapter_type_text),
+ GFP_ATOMIC);
+ if (!cname)
+ return;
+
+ cp = cname;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
else {
@@ -923,7 +932,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
- inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
+ inqstrcpy(cname, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@@ -931,14 +940,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
- c = 0;
- if (strlen(cp) > sizeof(str->pid)) {
- c = cp[sizeof(str->pid)];
+ if (strlen(cp) > sizeof(str->pid))
cp[sizeof(str->pid)] = '\0';
- }
inqstrcpy (cp, str->pid);
- if (c)
- cp[sizeof(str->pid)] = c;
+
+ kfree(cname);
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
@@ -1660,87 +1666,309 @@ static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
(void *) cmd);
}
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
+static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
+ struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
{
- struct fib *fibptr;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- struct aac_ciss_identify_pd *identify_resp;
- dma_addr_t addr;
- u32 vbus, vid;
- u16 fibsize, datasize;
- int rcode = -ENOMEM;
-
+ struct fib *fibptr;
+ dma_addr_t addr;
+ int rcode;
+ int fibsize;
+ struct aac_srb *srb;
+ struct aac_srb_reply *srb_reply;
+ struct sgmap64 *sg64;
+ u32 vbus;
+ u32 vid;
+
+ if (!dev->sa_firmware)
+ return 0;
+ /* allocate FIB */
fibptr = aac_fib_alloc(dev);
if (!fibptr)
- goto out;
+ return -ENOMEM;
- fibsize = sizeof(struct aac_srb) -
- sizeof(struct sgentry) + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_identify_pd);
+ aac_fib_init(fibptr);
+ fibptr->hw_fib_va->header.XferState &=
+ ~cpu_to_le32(FastResponseCapable);
- identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (!identify_resp)
- goto fib_free_ptr;
+ fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ sizeof(struct sgentry64);
- vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
- vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
+ /* allocate DMA buffer for response */
+ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ rcode = -ENOMEM;
+ goto fib_error;
+ }
- aac_fib_init(fibptr);
+ srb = fib_data(fibptr);
+ memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ vbus = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_bus);
+ vid = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_target);
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = 0x26;
- srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
- srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+ /* set the common request fields */
+ srb->channel = cpu_to_le32(vbus);
+ srb->id = cpu_to_le32(vid);
+ srb->lun = 0;
+ srb->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srb->timeout = 0;
+ srb->retry_limit = 0;
+ srb->cdb_size = cpu_to_le32(16);
+ srb->count = cpu_to_le32(xfer_len);
+
+ sg64 = (struct sgmap64 *)&srb->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
+ sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
+ sg64->sg[0].count = cpu_to_le32(xfer_len);
- sg64 = (struct sgmap64 *)&srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
- sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
- sg64->sg[0].count = cpu_to_le32(datasize);
+ /*
+ * Copy the updated data for other dumping or other usage if needed
+ */
+ memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
+
+ /* issue request to the controller */
+ rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
+ 1, 1, NULL, NULL);
+
+ if (rcode == -ERESTARTSYS)
+ rcode = -ERESTART;
+
+ if (unlikely(rcode < 0))
+ goto bmic_error;
+
+ srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
+ memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
+
+bmic_error:
+ dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
+fib_error:
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return rcode;
+}
+
+static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
+{
+
+ struct aac_ciss_identify_pd *identify_resp;
- rcode = aac_fib_send(ScsiPortCommand64,
- fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
+ if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
+ return;
+
+ identify_resp = dev->hba_map[bus][target].safw_identify_resp;
+ if (identify_resp == NULL) {
+ dev->hba_map[bus][target].qd_limit = 32;
+ return;
+ }
if (identify_resp->current_queue_depth_limit <= 0 ||
- identify_resp->current_queue_depth_limit > 32)
+ identify_resp->current_queue_depth_limit > 255)
dev->hba_map[bus][target].qd_limit = 32;
else
dev->hba_map[bus][target].qd_limit =
identify_resp->current_queue_depth_limit;
+}
- dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr);
+static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
+ struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb_unit srbu;
+ struct aac_srb *srbcmd;
+ struct aac_ciss_identify_pd *identify_reply;
- aac_fib_complete(fibptr);
+ datasize = sizeof(struct aac_ciss_identify_pd);
+ identify_reply = kmalloc(datasize, GFP_KERNEL);
+ if (!identify_reply)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = 0x26;
+ srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
+ srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ *identify_resp = identify_reply;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(identify_reply);
+ goto out;
+}
+
+static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
+{
+ kfree(dev->safw_phys_luns);
+ dev->safw_phys_luns = NULL;
+}
+
+/**
+ * aac_get_safw_ciss_luns() Process topology change
+ * @dev: aac_dev structure
+ *
+ * Execute a CISS REPORT PHYS LUNS and process the results into
+ * the current hba_map.
+ */
+static int aac_get_safw_ciss_luns(struct aac_dev *dev)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb *srbcmd;
+ struct aac_srb_unit srbu;
+ struct aac_ciss_phys_luns_resp *phys_luns;
+
+ datasize = sizeof(struct aac_ciss_phys_luns_resp) +
+ (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+ phys_luns = kmalloc(datasize, GFP_KERNEL);
+ if (phys_luns == NULL)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
+ srbcmd->cdb[1] = 2; /* extended reporting */
+ srbcmd->cdb[8] = (u8)(datasize >> 8);
+ srbcmd->cdb[9] = (u8)(datasize);
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ if (phys_luns->resp_flag != 2) {
+ rcode = -ENOMSG;
+ goto mem_free_all;
+ }
+
+ dev->safw_phys_luns = phys_luns;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(phys_luns);
+ goto out;
+}
+
+static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
+{
+ return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
+}
+
+static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
+}
+
+static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[0];
+}
+
+static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].bus >> 6;
+}
+
+static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[9];
+}
+
+static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
+{
+ return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
+}
+
+static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[8];
+}
+
+static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
+ int bus, int target)
+{
+ kfree(dev->hba_map[bus][target].safw_identify_resp);
+ dev->hba_map[bus][target].safw_identify_resp = NULL;
+}
+
+static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
+ int lun_count)
+{
+ int luns;
+ int i;
+ u32 bus;
+ u32 target;
+
+ luns = aac_get_safw_phys_lun_count(dev);
+
+ if (luns < lun_count)
+ lun_count = luns;
+ else if (lun_count < 0)
+ lun_count = luns;
+
+ for (i = 0; i < lun_count; i++) {
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ aac_free_safw_identify_resp(dev, bus, target);
+ }
+}
+
+static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
+{
+ int i;
+ int rcode = 0;
+ u32 lun_count;
+ u32 bus;
+ u32 target;
+ struct aac_ciss_identify_pd *identify_resp = NULL;
+
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ for (i = 0; i < lun_count; ++i) {
+
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ rcode = aac_issue_safw_bmic_identify(dev,
+ &identify_resp, bus, target);
+
+ if (unlikely(rcode < 0))
+ goto free_identify_resp;
+
+ dev->hba_map[bus][target].safw_identify_resp = identify_resp;
+ }
-fib_free_ptr:
- aac_fib_free(fibptr);
out:
return rcode;
+free_identify_resp:
+ aac_free_safw_all_identify_resp(dev, i);
+ goto out;
}
/**
- * aac_update hba_map()- update current hba map with data from FW
+ * aac_set_safw_attr_all_targets- update current hba map with data from FW
* @dev: aac_dev structure
* @phys_luns: FW information from report phys luns
+ * @rescan: Indicates scan type
*
* Update our hba map with the information gathered from the FW
*/
-void aac_update_hba_map(struct aac_dev *dev,
- struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
+static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
{
/* ok and extended reporting */
u32 lun_count, nexus;
@@ -1748,24 +1976,21 @@ void aac_update_hba_map(struct aac_dev *dev,
u8 expose_flag, attribs;
u8 devtype;
- lun_count = ((phys_luns->list_length[0] << 24)
- + (phys_luns->list_length[1] << 16)
- + (phys_luns->list_length[2] << 8)
- + (phys_luns->list_length[3])) / 24;
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ dev->scan_counter++;
for (i = 0; i < lun_count; ++i) {
- bus = phys_luns->lun[i].level2[1] & 0x3f;
- target = phys_luns->lun[i].level2[0];
- expose_flag = phys_luns->lun[i].bus >> 6;
- attribs = phys_luns->lun[i].node_ident[9];
- nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+ expose_flag = aac_get_safw_phys_expose_flag(dev, i);
+ attribs = aac_get_safw_phys_attribs(dev, i);
+ nexus = aac_get_safw_phys_nexus(dev, i);
if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
continue;
- dev->hba_map[bus][target].expose = expose_flag;
-
if (expose_flag != 0) {
devtype = AAC_DEVTYPE_RAID_MEMBER;
goto update_devtype;
@@ -1778,95 +2003,45 @@ void aac_update_hba_map(struct aac_dev *dev,
} else
devtype = AAC_DEVTYPE_ARC_RAW;
- if (devtype != AAC_DEVTYPE_NATIVE_RAW)
- goto update_devtype;
+ dev->hba_map[bus][target].scan_counter = dev->scan_counter;
- if (aac_issue_bmic_identify(dev, bus, target) < 0)
- dev->hba_map[bus][target].qd_limit = 32;
+ aac_set_safw_target_qd(dev, bus, target);
update_devtype:
- if (rescan == AAC_INIT)
- dev->hba_map[bus][target].devtype = devtype;
- else
- dev->hba_map[bus][target].new_devtype = devtype;
+ dev->hba_map[bus][target].devtype = devtype;
}
}
-/**
- * aac_report_phys_luns() Process topology change
- * @dev: aac_dev structure
- * @fibptr: fib pointer
- *
- * Execute a CISS REPORT PHYS LUNS and process the results into
- * the current hba_map.
- */
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
+static int aac_setup_safw_targets(struct aac_dev *dev)
{
- int fibsize, datasize;
- struct aac_ciss_phys_luns_resp *phys_luns;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- dma_addr_t addr;
- u32 vbus, vid;
int rcode = 0;
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
- + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_phys_luns_resp)
- + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
-
- phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (phys_luns == NULL) {
- rcode = -ENOMEM;
- goto err_out;
- }
-
- vbus = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_bus);
- vid = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_target);
+ rcode = aac_get_containers(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- aac_fib_init(fibptr);
+ rcode = aac_get_safw_ciss_luns(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ rcode = aac_get_safw_attr_all_targets(dev);
+ if (unlikely(rcode < 0))
+ goto free_ciss_luns;
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
- srbcmd->cdb[1] = 2; /* extended reporting */
- srbcmd->cdb[8] = (u8)(datasize >> 8);
- srbcmd->cdb[9] = (u8)(datasize);
-
- sg64 = (struct sgmap64 *) &srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
- sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
- sg64->sg[0].count = cpu_to_le32(datasize);
-
- rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
- FsaNormal, 1, 1, NULL, NULL);
-
- /* analyse data */
- if (rcode >= 0 && phys_luns->resp_flag == 2) {
- /* ok and extended reporting */
- aac_update_hba_map(dev, phys_luns, rescan);
- }
+ aac_set_safw_attr_all_targets(dev);
- dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr);
-err_out:
+ aac_free_safw_all_identify_resp(dev, -1);
+free_ciss_luns:
+ aac_free_safw_ciss_luns(dev);
+out:
return rcode;
}
+int aac_setup_safw_adapter(struct aac_dev *dev)
+{
+ return aac_setup_safw_targets(dev);
+}
+
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
@@ -1969,12 +2144,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
- if (!dev->sync_mode && dev->sa_firmware &&
- dev->supplement_adapter_info.virt_device_bus != 0xffff) {
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
- }
-
if (!dev->in_reset) {
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -2739,14 +2908,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
} else { /* check for physical non-dasd devices */
bus = aac_logical_to_phys(scmd_channel(scsicmd));
- if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
- (dev->hba_map[bus][cid].expose
- == AAC_HIDE_DISK)){
- if (scsicmd->cmnd[0] == INQUIRY) {
- scsicmd->result = DID_NO_CONNECT << 16;
- goto scsi_done_ret;
- }
- }
if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
dev->hba_map[bus][cid].devtype
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d52265416da2..0095fcbd1c88 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <scsi/scsi_host.h>
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -97,7 +98,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50834
+# define AAC_DRIVER_BUILD 50877
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -117,9 +118,13 @@ enum {
/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
#define AAC_MAX_BUSES 5
#define AAC_MAX_TARGETS 256
+#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define get_bus_number(x) (x/AAC_MAX_TARGETS)
+#define get_target_number(x) (x%AAC_MAX_TARGETS)
+
/* Thor AIF events */
#define SA_AIF_HOTPLUG (1<<1)
#define SA_AIF_HARDWARE (1<<2)
@@ -1334,17 +1339,17 @@ struct fib {
#define AAC_DEVTYPE_RAID_MEMBER 1
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_EXPOSE_DISK 0
-#define AAC_HIDE_DISK 3
+
+#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
u8 devtype; /* device type */
- u8 new_devtype;
u8 reset_state; /* 0 - no reset, 1..x - */
/* after xth TM LUN reset */
u16 qd_limit;
- u8 expose; /*checks if to expose or not*/
+ u32 scan_counter;
+ struct aac_ciss_identify_pd *safw_identify_resp;
};
/*
@@ -1560,6 +1565,7 @@ struct aac_dev
spinlock_t fib_lock;
struct mutex ioctl_mutex;
+ struct mutex scan_mutex;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
@@ -1605,6 +1611,7 @@ struct aac_dev
int maximum_num_channels;
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
+ struct delayed_work safw_rescan_work;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1668,9 +1675,11 @@ struct aac_dev
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
atomic_t msix_counter;
+ u32 scan_counter;
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
+ struct aac_ciss_phys_luns_resp *safw_phys_luns;
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
@@ -2023,6 +2032,12 @@ struct aac_srb_reply
__le32 sense_data_size;
u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
};
+
+struct aac_srb_unit {
+ struct aac_srb srb;
+ struct aac_srb_reply srb_reply;
+};
+
/*
* SRB Flags
*/
@@ -2627,16 +2642,41 @@ static inline int aac_adapter_check_health(struct aac_dev *dev)
return (dev)->a_ops.adapter_check_health(dev);
}
+
+int aac_scan_host(struct aac_dev *dev);
+
+static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+}
+
+static inline void aac_safw_rescan_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, safw_rescan_work);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+
+ aac_scan_host(dev);
+}
+
+static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+{
+ if (dev->sa_firmware)
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
#define AAC_OWNER_ERROR_HANDLER 0x103
#define AAC_OWNER_FIRMWARE 0x106
+void aac_safw_rescan_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
+int aac_setup_safw_adapter(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9ab0fa959d83..a2b3430072c7 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1052,9 +1052,13 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
return -EFAULT;
+ dev->adapter_shutdown = 1;
+
+ mutex_unlock(&dev->ioctl_mutex);
retval = aac_reset_adapter(dev, 0, reset.reset_type);
- return retval;
+ mutex_lock(&dev->ioctl_mutex);
+ return retval;
}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1bc623ad3faf..0dc7b5a4fea2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -42,6 +42,8 @@
#include <linux/completion.h>
#include <linux/mm.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
#include "aacraid.h"
@@ -284,6 +286,38 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static void aac_wait_for_io_completion(struct aac_dev *aac)
+{
+ unsigned long flagv = 0;
+ int i = 0;
+
+ for (i = 60; i; --i) {
+ struct scsi_device *dev;
+ struct scsi_cmnd *command;
+ int active = 0;
+
+ __shost_for_each_device(dev, aac->scsi_host_ptr) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+}
+
/**
* aac_send_shutdown - shutdown an adapter
* @dev: Adapter to shutdown
@@ -295,12 +329,10 @@ int aac_send_shutdown(struct aac_dev * dev)
{
struct fib * fibctx;
struct aac_close *cmd;
- int status;
+ int status = 0;
- fibctx = aac_fib_alloc(dev);
- if (!fibctx)
- return -ENOMEM;
- aac_fib_init(fibctx);
+ if (aac_adapter_check_health(dev))
+ return status;
if (!dev->adapter_shutdown) {
mutex_lock(&dev->ioctl_mutex);
@@ -308,6 +340,13 @@ int aac_send_shutdown(struct aac_dev * dev)
mutex_unlock(&dev->ioctl_mutex);
}
+ aac_wait_for_io_completion(dev);
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return -ENOMEM;
+ aac_fib_init(fibctx);
+
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xfffffffe);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 80a8cb26cdea..84858d5c8257 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -1629,28 +1630,28 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
command->scsi_done(command);
}
/*
- * Any Device that was already marked offline needs to be cleaned up
+ * Any Device that was already marked offline needs to be marked
+ * running
*/
__shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev)) {
- sdev_printk(KERN_INFO, dev, "Removing offline device\n");
- scsi_remove_device(dev);
- scsi_device_put(dev);
- }
+ if (!scsi_device_online(dev))
+ scsi_device_set_state(dev, SDEV_RUNNING);
}
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
+
/*
* Issue bus rescan to catch any configuration that might have
* occurred
*/
- if (!retval) {
- dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
- scsi_scan_host(host);
+ if (!retval && !is_kdump_kernel()) {
+ dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
+ aac_schedule_safw_scan_worker(aac);
}
+
if (jafo) {
spin_lock_irq(host->host_lock);
}
@@ -1681,31 +1682,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
- if (forced < 2) for (retval = 60; retval; --retval) {
- struct scsi_device * dev;
- struct scsi_cmnd * command;
- int active = 0;
-
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
-
- }
- /*
- * We can exit If all the commands are complete
- */
- if (active == 0)
- break;
- ssleep(1);
- }
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1874,42 +1850,124 @@ out:
return BlinkLED;
}
+static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
+{
+ return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
+}
+
+static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
+ int bus,
+ int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static void aac_put_safw_scsi_device(struct scsi_device *sdev)
+{
+ if (sdev)
+ scsi_device_put(sdev);
+}
-static void aac_resolve_luns(struct aac_dev *dev)
+static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
{
- int bus, target, channel;
struct scsi_device *sdev;
- u8 devtype;
- u8 new_devtype;
- for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
- for (target = 0; target < AAC_MAX_TARGETS; target++) {
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ scsi_remove_device(sdev);
+ aac_put_safw_scsi_device(sdev);
+}
- if (bus == CONTAINER_CHANNEL)
- channel = CONTAINER_CHANNEL;
- else
- channel = aac_phys_to_logical(bus);
+static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
+ int bus, int target)
+{
+ return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
+}
- devtype = dev->hba_map[bus][target].devtype;
- new_devtype = dev->hba_map[bus][target].new_devtype;
+static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
+{
+ if (is_safw_raid_volume(dev, bus, target))
+ return dev->fsa_dev[target].valid;
+ else
+ return aac_is_safw_scan_count_equal(dev, bus, target);
+}
- sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
- target, 0);
+static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
+{
+ int is_exposed = 0;
+ struct scsi_device *sdev;
- if (!sdev && new_devtype)
- scsi_add_device(dev->scsi_host_ptr, channel,
- target, 0);
- else if (sdev && new_devtype != devtype)
- scsi_remove_device(sdev);
- else if (sdev && new_devtype == devtype)
- scsi_rescan_device(&sdev->sdev_gendev);
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ if (sdev)
+ is_exposed = 1;
+ aac_put_safw_scsi_device(sdev);
- if (sdev)
- scsi_device_put(sdev);
+ return is_exposed;
+}
- dev->hba_map[bus][target].devtype = new_devtype;
- }
+static int aac_update_safw_host_devices(struct aac_dev *dev)
+{
+ int i;
+ int bus;
+ int target;
+ int is_exposed = 0;
+ int rcode = 0;
+
+ rcode = aac_setup_safw_adapter(dev);
+ if (unlikely(rcode < 0)) {
+ goto out;
}
+
+ for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
+
+ bus = get_bus_number(i);
+ target = get_target_number(i);
+
+ is_exposed = aac_is_safw_device_exposed(dev, bus, target);
+
+ if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
+ aac_add_safw_device(dev, bus, target);
+ else if (!aac_is_safw_target_valid(dev, bus, target) &&
+ is_exposed)
+ aac_remove_safw_device(dev, bus, target);
+ }
+out:
+ return rcode;
+}
+
+static int aac_scan_safw_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ rcode = aac_update_safw_host_devices(dev);
+ if (rcode)
+ aac_schedule_safw_scan_worker(dev);
+
+ return rcode;
+}
+
+int aac_scan_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ mutex_lock(&dev->scan_mutex);
+ if (dev->sa_firmware)
+ rcode = aac_scan_safw_host(dev);
+ else
+ scsi_scan_host(dev->scsi_host_ptr);
+ mutex_unlock(&dev->scan_mutex);
+
+ return rcode;
}
/**
@@ -1922,10 +1980,8 @@ static void aac_resolve_luns(struct aac_dev *dev)
*/
static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
{
- int i, bus, target, container, rcode = 0;
+ int i;
u32 events = 0;
- struct fib *fib;
- struct scsi_device *sdev;
if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
events = SA_AIF_HOTPLUG;
@@ -1947,44 +2003,8 @@ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
case SA_AIF_LDEV_CHANGE:
case SA_AIF_BPCFG_CHANGE:
- fib = aac_fib_alloc(dev);
- if (!fib) {
- pr_err("aac_handle_sa_aif: out of memory\n");
- return;
- }
- for (bus = 0; bus < AAC_MAX_BUSES; bus++)
- for (target = 0; target < AAC_MAX_TARGETS; target++)
- dev->hba_map[bus][target].new_devtype = 0;
-
- rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
-
- if (rcode != -ERESTARTSYS)
- aac_fib_free(fib);
-
- aac_resolve_luns(dev);
-
- if (events == SA_AIF_LDEV_CHANGE ||
- events == SA_AIF_BPCFG_CHANGE) {
- aac_get_containers(dev);
- for (container = 0; container <
- dev->maximum_num_containers; ++container) {
- sdev = scsi_device_lookup(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- if (dev->fsa_dev[container].valid && !sdev) {
- scsi_add_device(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- } else if (!dev->fsa_dev[container].valid &&
- sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
- scsi_device_put(sdev);
- }
- }
- }
+ aac_scan_host(dev);
+
break;
case SA_AIF_BPSTAT_CHANGE:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d55332de08f9..b3b931ab77eb 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -683,6 +683,9 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
u32 bus, cid;
int ret = FAILED;
+ if (aac_adapter_check_health(aac))
+ return ret;
+
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
@@ -690,7 +693,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
struct aac_hba_tm_req *tmf;
int status;
u64 address;
- __le32 managed_request_id;
pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
AAC_DRIVERNAME,
@@ -703,8 +705,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
(fib->callback_data == cmd)) {
found = 1;
- managed_request_id = ((struct aac_hba_cmd_req *)
- fib->hw_fib_va)->request_id;
break;
}
}
@@ -1375,18 +1375,15 @@ static ssize_t aac_store_reset_adapter(struct device *device,
const char *buf, size_t count)
{
int retval = -EACCES;
- int bled = 0;
- struct aac_dev *aac;
-
if (!capable(CAP_SYS_ADMIN))
return retval;
- aac = (struct aac_dev *)class_to_shost(device)->hostdata;
- bled = buf[0] == '!' ? 1:0;
- retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
+ retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
+ buf[0] == '!', IOP_HWSOFT_RESET);
if (retval >= 0)
retval = count;
+
return retval;
}
@@ -1689,6 +1686,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
+ mutex_init(&aac->scan_mutex);
+
+ INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1792,7 +1792,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_deinit;
- scsi_scan_host(shost);
+
+ aac_scan_host(aac);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -1877,6 +1878,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
+ aac_cancel_safw_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1935,6 +1937,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ aac_cancel_safw_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1992,6 +1995,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
+ aac_cancel_safw_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
@@ -2076,7 +2080,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
scsi_unblock_requests(aac->scsi_host_ptr);
- scsi_scan_host(aac->scsi_host_ptr);
+ aac_scan_host(aac);
pci_save_state(pdev);
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 553922fed524..882f40353b96 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -329,6 +329,22 @@ int aac_sa_init(struct aac_dev *dev)
instance = dev->id;
name = dev->name;
+ /*
+ * Fill in the function dispatch table.
+ */
+
+ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+ dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_start = aac_sa_start_adapter;
+ dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+
if (aac_sa_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
@@ -363,22 +379,6 @@ int aac_sa_init(struct aac_dev *dev)
}
/*
- * Fill in the function dispatch table.
- */
-
- dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
- dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
- dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
- dev->a_ops.adapter_notify = aac_sa_notify_adapter;
- dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
- dev->a_ops.adapter_check_health = aac_sa_check_health;
- dev->a_ops.adapter_restart = aac_sa_restart_adapter;
- dev->a_ops.adapter_start = aac_sa_start_adapter;
- dev->a_ops.adapter_intr = aac_sa_intr;
- dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
- dev->a_ops.adapter_ioremap = aac_sa_ioremap;
-
- /*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a254b32eba39..f375f3557c18 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,52 +45,57 @@
#include <linux/interrupt.h>
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
-#ifdef CONFIG_XEN
- #define ARCMSR_MAX_FREECCB_NUM 160
-#define ARCMSR_MAX_OUTSTANDING_CMD 155
-#else
- #define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_MAX_OUTSTANDING_CMD 255
-#endif
-#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126"
-#define ARCMSR_SCSI_INITIATOR_ID 255
-#define ARCMSR_MAX_XFER_SECTORS 512
-#define ARCMSR_MAX_XFER_SECTORS_B 4096
-#define ARCMSR_MAX_XFER_SECTORS_C 304
-#define ARCMSR_MAX_TARGETID 17
-#define ARCMSR_MAX_TARGETLUN 8
-#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
-#define ARCMSR_MAX_QBUFFER 4096
-#define ARCMSR_DEFAULT_SG_ENTRIES 38
-#define ARCMSR_MAX_HBB_POSTQUEUE 264
+#define ARCMSR_MAX_FREECCB_NUM 1024
+#define ARCMSR_MAX_OUTSTANDING_CMD 1024
+#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
+#define ARCMSR_MIN_OUTSTANDING_CMD 32
+#define ARCMSR_DRIVER_VERSION "v1.40.00.04-20171130"
+#define ARCMSR_SCSI_INITIATOR_ID 255
+#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
+#define ARCMSR_MAX_XFER_SECTORS_C 304
+#define ARCMSR_MAX_TARGETID 17
+#define ARCMSR_MAX_TARGETLUN 8
+#define ARCMSR_MAX_CMD_PERLUN 128
+#define ARCMSR_DEFAULT_CMD_PERLUN 32
+#define ARCMSR_MIN_CMD_PERLUN 1
+#define ARCMSR_MAX_QBUFFER 4096
+#define ARCMSR_DEFAULT_SG_ENTRIES 38
+#define ARCMSR_MAX_HBB_POSTQUEUE 264
#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
-#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
-#define ARCMSR_CDB_SG_PAGE_LENGTH 256
+#define ARCMSR_MAX_HBE_DONEQUEUE 512
+#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
+#define ARCMSR_CDB_SG_PAGE_LENGTH 256
#define ARCMST_NUM_MSIX_VECTORS 4
#ifndef PCI_DEVICE_ID_ARECA_1880
-#define PCI_DEVICE_ID_ARECA_1880 0x1880
- #endif
+#define PCI_DEVICE_ID_ARECA_1880 0x1880
+#endif
#ifndef PCI_DEVICE_ID_ARECA_1214
- #define PCI_DEVICE_ID_ARECA_1214 0x1214
+#define PCI_DEVICE_ID_ARECA_1214 0x1214
#endif
#ifndef PCI_DEVICE_ID_ARECA_1203
- #define PCI_DEVICE_ID_ARECA_1203 0x1203
+#define PCI_DEVICE_ID_ARECA_1203 0x1203
#endif
+#ifndef PCI_DEVICE_ID_ARECA_1884
+#define PCI_DEVICE_ID_ARECA_1884 0x1884
+#endif
+#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
+#define ARCMSR_MINUTES (1000 * 60 * 60)
/*
**********************************************************************************
**
**********************************************************************************
*/
-#define ARC_SUCCESS 0
-#define ARC_FAILURE 1
+#define ARC_SUCCESS 0
+#define ARC_FAILURE 1
/*
*******************************************************************************
** split 64bits dma addressing
*******************************************************************************
*/
-#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
-#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
+#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
+#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
/*
*******************************************************************************
** MESSAGE CONTROL CODE
@@ -130,7 +135,7 @@ struct CMD_MESSAGE_FIELD
#define FUNCTION_SAY_HELLO 0x0807
#define FUNCTION_SAY_GOODBYE 0x0808
#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
-#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
+#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
#define FUNCTION_HARDWARE_RESET 0x080B
/* ARECA IO CONTROL CODE*/
#define ARCMSR_MESSAGE_READ_RQBUFFER \
@@ -161,18 +166,18 @@ struct CMD_MESSAGE_FIELD
** structure for holding DMA address data
*************************************************************
*/
-#define IS_DMA64 (sizeof(dma_addr_t) == 8)
-#define IS_SG64_ADDR 0x01000000 /* bit24 */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+#define IS_SG64_ADDR 0x01000000 /* bit24 */
struct SG32ENTRY
{
- __le32 length;
- __le32 address;
+ __le32 length;
+ __le32 address;
}__attribute__ ((packed));
struct SG64ENTRY
{
- __le32 length;
- __le32 address;
- __le32 addresshigh;
+ __le32 length;
+ __le32 address;
+ __le32 addresshigh;
}__attribute__ ((packed));
/*
********************************************************************
@@ -191,50 +196,50 @@ struct QBUFFER
*/
struct FIRMWARE_INFO
{
- uint32_t signature; /*0, 00-03*/
- uint32_t request_len; /*1, 04-07*/
- uint32_t numbers_queue; /*2, 08-11*/
- uint32_t sdram_size; /*3, 12-15*/
- uint32_t ide_channels; /*4, 16-19*/
- char vendor[40]; /*5, 20-59*/
- char model[8]; /*15, 60-67*/
- char firmware_ver[16]; /*17, 68-83*/
- char device_map[16]; /*21, 84-99*/
- uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
- uint8_t cfgSerial[16]; /*26,104-119*/
- uint32_t cfgPicStatus; /*30,120-123*/
+ uint32_t signature; /*0, 00-03*/
+ uint32_t request_len; /*1, 04-07*/
+ uint32_t numbers_queue; /*2, 08-11*/
+ uint32_t sdram_size; /*3, 12-15*/
+ uint32_t ide_channels; /*4, 16-19*/
+ char vendor[40]; /*5, 20-59*/
+ char model[8]; /*15, 60-67*/
+ char firmware_ver[16]; /*17, 68-83*/
+ char device_map[16]; /*21, 84-99*/
+ uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
+ uint8_t cfgSerial[16]; /*26,104-119*/
+ uint32_t cfgPicStatus; /*30,120-123*/
};
/* signature of set and get firmware config */
-#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
-#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
+#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
+#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
/* message code of inbound message register */
-#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
-#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
-#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
-#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
-#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
-#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
-#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
-#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
-#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
+#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
+#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
+#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
+#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
+#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
+#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
+#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
+#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
+#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
/* doorbell interrupt generator */
-#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
-#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
-#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
-#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
+#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
+#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
+#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
+#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
/* ccb areca cdb flag */
-#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
-#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
+#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
+#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
/* outbound firmware ok */
-#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
+#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
/* ARC-1680 Bus Reset*/
-#define ARCMSR_ARC1680_BUS_RESET 0x00000003
+#define ARCMSR_ARC1680_BUS_RESET 0x00000003
/* ARC-1880 Bus Reset*/
-#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
-#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
+#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
+#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
/*
************************************************************************
@@ -277,9 +282,10 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
+#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
-#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
@@ -288,7 +294,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
-#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
+#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */
@@ -313,12 +319,12 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
/* Host Interrupt Status */
-#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
+#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
/*
** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
*/
-#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
+#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
/*
** Set if Outbound Doorbell register bits 30:1 have a non-zero
** value. This bit clears only when Outbound Doorbell bits
@@ -331,7 +337,7 @@ struct FIRMWARE_INFO
** Register (FIFO) is not empty. It clears when the Outbound
** Post List FIFO is empty.
*/
-#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
+#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
/*
** This bit indicates a SAS interrupt from a source external to
** the PCIe core. This bit is not maskable.
@@ -340,17 +346,17 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
/*inbound message 0 ready*/
-#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
/*more than 12 request completed in a time*/
#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
/*outbound DATA WRITE isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
+#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
/*outbound DATA READ isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
+#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
/*outbound message 0 ready*/
-#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
/*outbound message cmd isr door bell clear*/
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
@@ -407,18 +413,43 @@ struct FIRMWARE_INFO
#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
/*
*******************************************************************************
+** SPEC. for Areca Type E adapter
+*******************************************************************************
+*/
+#define ARCMSR_SIGNATURE_1884 0x188417D3
+
+#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000
+
+#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
+#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
+#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009
+
+/* ARC-1884 doorbell sync */
+#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
+#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
+#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
+/*
+*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
*******************************************************************************
*/
struct ARCMSR_CDB
{
- uint8_t Bus;
- uint8_t TargetID;
- uint8_t LUN;
- uint8_t Function;
- uint8_t CdbLength;
- uint8_t sgcount;
- uint8_t Flags;
+ uint8_t Bus;
+ uint8_t TargetID;
+ uint8_t LUN;
+ uint8_t Function;
+ uint8_t CdbLength;
+ uint8_t sgcount;
+ uint8_t Flags;
#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
#define ARCMSR_CDB_FLAG_BIOS 0x02
#define ARCMSR_CDB_FLAG_WRITE 0x04
@@ -426,21 +457,21 @@ struct ARCMSR_CDB
#define ARCMSR_CDB_FLAG_HEADQ 0x08
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
- uint8_t msgPages;
- uint32_t msgContext;
- uint32_t DataLength;
- uint8_t Cdb[16];
- uint8_t DeviceStatus;
+ uint8_t msgPages;
+ uint32_t msgContext;
+ uint32_t DataLength;
+ uint8_t Cdb[16];
+ uint8_t DeviceStatus;
#define ARCMSR_DEV_CHECK_CONDITION 0x02
#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
#define ARCMSR_DEV_ABORTED 0xF1
#define ARCMSR_DEV_INIT_FAIL 0xF2
- uint8_t SenseData[15];
+ uint8_t SenseData[15];
union
{
- struct SG32ENTRY sg32entry[1];
- struct SG64ENTRY sg64entry[1];
+ struct SG32ENTRY sg32entry[1];
+ struct SG64ENTRY sg64entry[1];
} u;
};
/*
@@ -480,13 +511,13 @@ struct MessageUnit_B
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
uint32_t postq_index;
uint32_t doneq_index;
- uint32_t __iomem *drv2iop_doorbell;
- uint32_t __iomem *drv2iop_doorbell_mask;
- uint32_t __iomem *iop2drv_doorbell;
- uint32_t __iomem *iop2drv_doorbell_mask;
- uint32_t __iomem *message_rwbuffer;
- uint32_t __iomem *message_wbuffer;
- uint32_t __iomem *message_rbuffer;
+ uint32_t __iomem *drv2iop_doorbell;
+ uint32_t __iomem *drv2iop_doorbell_mask;
+ uint32_t __iomem *iop2drv_doorbell;
+ uint32_t __iomem *iop2drv_doorbell_mask;
+ uint32_t __iomem *message_rwbuffer;
+ uint32_t __iomem *message_wbuffer;
+ uint32_t __iomem *message_rbuffer;
};
/*
*********************************************************************
@@ -506,7 +537,7 @@ struct MessageUnit_C{
uint32_t diagnostic_rw_data; /*0024 0027*/
uint32_t diagnostic_rw_address_low; /*0028 002B*/
uint32_t diagnostic_rw_address_high; /*002C 002F*/
- uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_status; /*0030 0033*/
uint32_t host_int_mask; /*0034 0037*/
uint32_t dcr_data; /*0038 003B*/
uint32_t dcr_address; /*003C 003F*/
@@ -518,12 +549,12 @@ struct MessageUnit_C{
uint32_t iop_int_mask; /*0054 0057*/
uint32_t iop_inbound_queue_port; /*0058 005B*/
uint32_t iop_outbound_queue_port; /*005C 005F*/
- uint32_t inbound_free_list_index; /*0060 0063*/
- uint32_t inbound_post_list_index; /*0064 0067*/
- uint32_t outbound_free_list_index; /*0068 006B*/
- uint32_t outbound_post_list_index; /*006C 006F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t outbound_free_list_index; /*0068 006B*/
+ uint32_t outbound_post_list_index; /*006C 006F*/
uint32_t inbound_doorbell_clear; /*0070 0073*/
- uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
uint32_t last_used_message_source_address_low; /*0078 007B*/
uint32_t last_used_message_source_address_high; /*007C 007F*/
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
@@ -531,7 +562,7 @@ struct MessageUnit_C{
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
uint32_t utility_A_int_counter_timer; /*0098 009B*/
uint32_t outbound_doorbell; /*009C 009F*/
- uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
uint32_t message_source_address_index; /*00A4 00A7*/
uint32_t message_done_queue_index; /*00A8 00AB*/
uint32_t reserved0; /*00AC 00AF*/
@@ -553,10 +584,10 @@ struct MessageUnit_C{
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
- uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
uint32_t write_sequence; /*00FC 00FF*/
uint32_t reserved1[34]; /*0100 0187*/
- uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
uint32_t message_wbuffer[32]; /*2000 207F*/
uint32_t reserved3[32]; /*2080 20FF*/
uint32_t message_rbuffer[32]; /*2100 217F*/
@@ -614,115 +645,208 @@ struct MessageUnit_D {
u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
};
/*
+*********************************************************************
+** Messaging Unit (MU) of Type E processor(LSI)
+*********************************************************************
+*/
+struct MessageUnit_E{
+ uint32_t iobound_doorbell; /*0000 0003*/
+ uint32_t write_sequence_3xxx; /*0004 0007*/
+ uint32_t host_diagnostic_3xxx; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t reply_post_producer_index; /*0068 006B*/
+ uint32_t reply_post_consumer_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[34]; /*0100 0187*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t message_wbuffer[32]; /*2000 207F*/
+ uint32_t reserved3[32]; /*2080 20FF*/
+ uint32_t message_rbuffer[32]; /*2100 217F*/
+ uint32_t reserved4[32]; /*2180 21FF*/
+ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
+};
+
+typedef struct deliver_completeQ {
+ uint16_t cmdFlag;
+ uint16_t cmdSMID;
+ uint16_t cmdLMID; // reserved (0)
+ uint16_t cmdFlag2; // reserved (0)
+} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
+/*
*******************************************************************************
** Adapter Control Block
*******************************************************************************
*/
struct AdapterControlBlock
{
- uint32_t adapter_type; /* adapter A,B..... */
- #define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
- #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
- #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
- #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
- u32 roundup_ccbsize;
- struct pci_dev * pdev;
- struct Scsi_Host * host;
- unsigned long vir2phy_offset;
+ uint32_t adapter_type; /* adapter A,B..... */
+#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
+#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
+#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
+#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
+#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
+ u32 roundup_ccbsize;
+ struct pci_dev * pdev;
+ struct Scsi_Host * host;
+ unsigned long vir2phy_offset;
/* Offset is used in making arc cdb physical to virtual calculations */
- uint32_t outbound_int_enable;
- uint32_t cdb_phyaddr_hi32;
- uint32_t reg_mu_acc_handle0;
- spinlock_t eh_lock;
- spinlock_t ccblist_lock;
- spinlock_t postq_lock;
- spinlock_t doneq_lock;
- spinlock_t rqbuffer_lock;
- spinlock_t wqbuffer_lock;
+ uint32_t outbound_int_enable;
+ uint32_t cdb_phyaddr_hi32;
+ uint32_t reg_mu_acc_handle0;
+ spinlock_t eh_lock;
+ spinlock_t ccblist_lock;
+ spinlock_t postq_lock;
+ spinlock_t doneq_lock;
+ spinlock_t rqbuffer_lock;
+ spinlock_t wqbuffer_lock;
union {
struct MessageUnit_A __iomem *pmuA;
struct MessageUnit_B *pmuB;
struct MessageUnit_C __iomem *pmuC;
struct MessageUnit_D *pmuD;
+ struct MessageUnit_E __iomem *pmuE;
};
/* message unit ATU inbound base address0 */
- void __iomem *mem_base0;
- void __iomem *mem_base1;
- uint32_t acb_flags;
+ void __iomem *mem_base0;
+ void __iomem *mem_base1;
+ uint32_t acb_flags;
u16 dev_id;
- uint8_t adapter_index;
- #define ACB_F_SCSISTOPADAPTER 0x0001
- #define ACB_F_MSG_STOP_BGRB 0x0002
- /* stop RAID background rebuild */
- #define ACB_F_MSG_START_BGRB 0x0004
- /* stop RAID background rebuild */
- #define ACB_F_IOPDATA_OVERFLOW 0x0008
- /* iop message data rqbuffer overflow */
- #define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
- /* message clear wqbuffer */
- #define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
- /* message clear rqbuffer */
- #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
- #define ACB_F_BUS_RESET 0x0080
- #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
+ uint8_t adapter_index;
+#define ACB_F_SCSISTOPADAPTER 0x0001
+#define ACB_F_MSG_STOP_BGRB 0x0002
+/* stop RAID background rebuild */
+#define ACB_F_MSG_START_BGRB 0x0004
+/* stop RAID background rebuild */
+#define ACB_F_IOPDATA_OVERFLOW 0x0008
+/* iop message data rqbuffer overflow */
+#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
+/* message clear wqbuffer */
+#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
+/* message clear rqbuffer */
+#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
+#define ACB_F_BUS_RESET 0x0080
+#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
- #define ACB_F_IOP_INITED 0x0100
- /* iop init */
- #define ACB_F_ABORT 0x0200
- #define ACB_F_FIRMWARE_TRAP 0x0400
- struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
+#define ACB_F_IOP_INITED 0x0100
+/* iop init */
+#define ACB_F_ABORT 0x0200
+#define ACB_F_FIRMWARE_TRAP 0x0400
+#define ACB_F_MSG_GET_CONFIG 0x1000
+ struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
- struct list_head ccb_free_list;
+ struct list_head ccb_free_list;
/* head of free ccb list */
- atomic_t ccboutstandingcount;
+ atomic_t ccboutstandingcount;
/*The present outstanding command number that in the IOP that
waiting for being handled by FW*/
- void * dma_coherent;
+ void * dma_coherent;
/* dma_coherent used for memory free */
- dma_addr_t dma_coherent_handle;
+ dma_addr_t dma_coherent_handle;
/* dma_coherent_handle used for memory free */
- dma_addr_t dma_coherent_handle2;
- void *dma_coherent2;
- unsigned int uncache_size;
- uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
+ dma_addr_t dma_coherent_handle2;
+ void *dma_coherent2;
+ unsigned int uncache_size;
+ uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for read from 80331 */
- int32_t rqbuf_getIndex;
+ int32_t rqbuf_getIndex;
/* first of read buffer */
- int32_t rqbuf_putIndex;
+ int32_t rqbuf_putIndex;
/* last of read buffer */
- uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
+ uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for write to 80331 */
- int32_t wqbuf_getIndex;
+ int32_t wqbuf_getIndex;
/* first of write buffer */
- int32_t wqbuf_putIndex;
+ int32_t wqbuf_putIndex;
/* last of write buffer */
- uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
+ uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
/* id0 ..... id15, lun0...lun7 */
-#define ARECA_RAID_GONE 0x55
-#define ARECA_RAID_GOOD 0xaa
- uint32_t num_resets;
- uint32_t num_aborts;
- uint32_t signature;
- uint32_t firm_request_len;
- uint32_t firm_numbers_queue;
- uint32_t firm_sdram_size;
- uint32_t firm_hd_channels;
- uint32_t firm_cfg_version;
+#define ARECA_RAID_GONE 0x55
+#define ARECA_RAID_GOOD 0xaa
+ uint32_t num_resets;
+ uint32_t num_aborts;
+ uint32_t signature;
+ uint32_t firm_request_len;
+ uint32_t firm_numbers_queue;
+ uint32_t firm_sdram_size;
+ uint32_t firm_hd_channels;
+ uint32_t firm_cfg_version;
char firm_model[12];
char firm_version[20];
char device_map[20]; /*21,84-99*/
- struct work_struct arcmsr_do_message_isr_bh;
- struct timer_list eternal_timer;
+ struct work_struct arcmsr_do_message_isr_bh;
+ struct timer_list eternal_timer;
unsigned short fw_flag;
- #define FW_NORMAL 0x0000
- #define FW_BOG 0x0001
- #define FW_DEADLOCK 0x0010
- atomic_t rq_map_token;
- atomic_t ante_token_value;
- uint32_t maxOutstanding;
- int vector_count;
+#define FW_NORMAL 0x0000
+#define FW_BOG 0x0001
+#define FW_DEADLOCK 0x0010
+ atomic_t rq_map_token;
+ atomic_t ante_token_value;
+ uint32_t maxOutstanding;
+ int vector_count;
+ uint32_t maxFreeCCB;
+ struct timer_list refresh_timer;
+ uint32_t doneq_index;
+ uint32_t ccbsize;
+ uint32_t in_doorbell;
+ uint32_t out_doorbell;
+ uint32_t completionQ_entry;
+ pCompletion_Q pCompletionQ;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
@@ -732,29 +856,30 @@ struct AdapterControlBlock
*/
struct CommandControlBlock{
/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
- struct list_head list; /*x32: 8byte, x64: 16byte*/
- struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
- struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
- uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
- uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
- uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
- #define CCB_FLAG_READ 0x0000
- #define CCB_FLAG_WRITE 0x0001
- #define CCB_FLAG_ERROR 0x0002
- #define CCB_FLAG_FLUSHCACHE 0x0004
- #define CCB_FLAG_MASTER_ABORTED 0x0008
- uint16_t startdone; /*x32:2byte,x32:2byte*/
- #define ARCMSR_CCB_DONE 0x0000
- #define ARCMSR_CCB_START 0x55AA
- #define ARCMSR_CCB_ABORTED 0xAA55
- #define ARCMSR_CCB_ILLEGAL 0xFFFF
- #if BITS_PER_LONG == 64
+ struct list_head list; /*x32: 8byte, x64: 16byte*/
+ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
+ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
+ uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
+ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
+ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
+#define CCB_FLAG_READ 0x0000
+#define CCB_FLAG_WRITE 0x0001
+#define CCB_FLAG_ERROR 0x0002
+#define CCB_FLAG_FLUSHCACHE 0x0004
+#define CCB_FLAG_MASTER_ABORTED 0x0008
+ uint16_t startdone; /*x32:2byte,x32:2byte*/
+#define ARCMSR_CCB_DONE 0x0000
+#define ARCMSR_CCB_START 0x55AA
+#define ARCMSR_CCB_ABORTED 0xAA55
+#define ARCMSR_CCB_ILLEGAL 0xFFFF
+ uint32_t smid;
+#if BITS_PER_LONG == 64
/* ======================512+64 bytes======================== */
- uint32_t reserved[5]; /*24 byte*/
- #else
+ uint32_t reserved[4]; /*16 byte*/
+#else
/* ======================512+32 bytes======================== */
- uint32_t reserved; /*8 byte*/
- #endif
+ // uint32_t reserved; /*4 byte*/
+#endif
/* ======================================================= */
struct ARCMSR_CDB arcmsr_cdb;
};
@@ -788,13 +913,13 @@ struct SENSE_DATA
** Outbound Interrupt Status Register - OISR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
-#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
-#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
+#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
+#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
(ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
|ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
|ARCMSR_MU_OUTBOUND_DOORBELL_INT \
@@ -805,13 +930,13 @@ struct SENSE_DATA
** Outbound Interrupt Mask Register - OIMR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
-#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
-#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
+#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
+#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 21f6421536a0..75e828bd30e3 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,6 +75,26 @@ MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
+static int msix_enable = 1;
+module_param(msix_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
+
+static int msi_enable = 1;
+module_param(msi_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
+
+static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+module_param(host_can_queue, int, S_IRUGO);
+MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
+
+static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+module_param(cmd_per_lun, int, S_IRUGO);
+MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
+
+static int set_date_time = 0;
+module_param(set_date_time, int, S_IRUGO);
+MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
+
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
@@ -102,19 +122,19 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(struct timer_list *t);
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
+static void arcmsr_set_iop_datetime(struct timer_list *);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -127,15 +147,15 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.name = "Areca SAS/SATA RAID driver",
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
- .eh_abort_handler = arcmsr_abort,
+ .eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
- .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
- .this_id = ARCMSR_SCSI_INITIATOR_ID,
- .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
- .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
- .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
+ .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
+ .this_id = ARCMSR_SCSI_INITIATOR_ID,
+ .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
+ .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
+ .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
.no_write_same = 1,
@@ -184,13 +204,15 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
.driver_data = ACB_ADAPTER_TYPE_C},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
+ .driver_data = ACB_ADAPTER_TYPE_E},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
- .id_table = arcmsr_device_id_table,
+ .id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.suspend = arcmsr_suspend,
@@ -206,7 +228,8 @@ static void arcmsr_free_mu(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
- case ACB_ADAPTER_TYPE_D: {
+ case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E: {
dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
@@ -271,6 +294,20 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
acb->mem_base0 = mem_base0;
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ acb->pmuE = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!acb->pmuE) {
+ pr_notice("arcmsr%d: memory mapping region fail \n",
+ acb->host->host_no);
+ return false;
+ }
+ writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ break;
+ }
}
return true;
}
@@ -295,6 +332,9 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
iounmap(acb->mem_base0);
break;
+ case ACB_ADAPTER_TYPE_E:
+ iounmap(acb->pmuE);
+ break;
}
}
@@ -408,6 +448,24 @@ static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
return false;
}
+static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ int i;
+ uint32_t read_doorbell;
+ struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
+
+ for (i = 0; i < 2000; i++) {
+ read_doorbell = readl(&phbcmu->iobound_doorbell);
+ if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(0, &phbcmu->host_int_status); /*clear interrupt*/
+ pACB->in_doorbell = read_doorbell;
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+ return false;
+}
+
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -475,6 +533,24 @@ static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
} while (retry_count != 0);
}
+static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
+{
+ int retry_count = 30;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ do {
+ if (arcmsr_hbaE_wait_msgint_ready(pACB))
+ break;
+ retry_count--;
+ pr_notice("arcmsr%d: wait 'flush adapter "
+ "cache' timeout, retry count down = %d\n",
+ pACB->host->host_no, retry_count);
+ } while (retry_count != 0);
+}
+
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -495,6 +571,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_flush_cache(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_flush_cache(acb);
+ break;
}
}
@@ -577,6 +656,23 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ uint32_t completeQ_size;
+ completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
+ acb->roundup_ccbsize = roundup(completeQ_size, 32);
+ dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent){
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ acb->pCompletionQ = dma_coherent;
+ acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ }
+ break;
default:
break;
}
@@ -610,7 +706,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->max_sectors = max_xfer_len/512;
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
- acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
+ acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -619,9 +715,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->dma_coherent = dma_coherent;
acb->dma_coherent_handle = dma_coherent_handle;
memset(dma_coherent, 0, acb->uncache_size);
+ acb->ccbsize = roundup_ccbsize;
ccb_tmp = dma_coherent;
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
- for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
+ for(i = 0; i < acb->maxFreeCCB; i++){
cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
@@ -630,11 +727,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E:
ccb_tmp->cdb_phyaddr = cdb_phyaddr;
break;
}
acb->pccb_pool[i] = ccb_tmp;
ccb_tmp->acb = acb;
+ ccb_tmp->smid = (u32)i << 16;
INIT_LIST_HEAD(&ccb_tmp->list);
list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
@@ -654,6 +753,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
struct scsi_device *psdev;
char diff, temp;
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -683,6 +783,13 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
}
atomic_inc(&acb->rq_map_token);
if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
@@ -723,17 +830,26 @@ arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
unsigned long flags;
int nvec, i;
+ if (msix_enable == 0)
+ goto msi_int0;
nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
PCI_IRQ_MSIX);
if (nvec > 0) {
pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
flags = 0;
} else {
- nvec = pci_alloc_irq_vectors(pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+msi_int0:
+ if (msi_enable == 1) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1) {
+ dev_info(&pdev->dev, "msi enabled\n");
+ goto msi_int1;
+ }
+ }
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
if (nvec < 1)
return FAILED;
-
+msi_int1:
flags = IRQF_SHARED;
}
@@ -755,6 +871,24 @@ out_free_irq:
return FAILED;
}
+static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
+{
+ INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&pacb->rq_map_token, 16);
+ atomic_set(&pacb->ante_token_value, 16);
+ pacb->fw_flag = FW_NORMAL;
+ timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
+ pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ add_timer(&pacb->eternal_timer);
+}
+
+static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
+{
+ timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
+ pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ add_timer(&pacb->refresh_timer);
+}
+
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -789,8 +923,12 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
- host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
- host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
+ host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+ host->can_queue = host_can_queue; /* max simultaneous cmds */
+ if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
+ cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+ host->cmd_per_lun = cmd_per_lun;
host->this_id = ARCMSR_SCSI_INITIATOR_ID;
host->unique_id = (bus << 8) | dev_fun;
pci_set_drvdata(pdev, host);
@@ -833,18 +971,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto scsi_host_remove;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
scsi_scan_host(host);
return 0;
out_free_sysfs:
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
del_timer_sync(&acb->eternal_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
@@ -887,6 +1023,8 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -924,13 +1062,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
return 0;
controller_stop:
arcmsr_stop_adapter_bgrb(acb);
@@ -998,6 +1132,21 @@ static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
return true;
}
+static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'abort all outstanding "
+ "command' timeout\n", pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
@@ -1020,6 +1169,9 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtnval = arcmsr_hbaD_abort_allcmd(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtnval = arcmsr_hbaE_abort_allcmd(acb);
+ break;
}
return rtnval;
}
@@ -1050,7 +1202,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
struct scsi_cmnd *pcmd = ccb->pcmd;
struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = DID_OK << 16;
+ pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (sensebuffer) {
int sense_data_length =
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
@@ -1059,6 +1211,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
+ pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1092,6 +1245,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ orig_mask = readl(&reg->host_int_mask);
+ writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
+ readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
+ }
+ break;
}
return orig_mask;
}
@@ -1196,7 +1356,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
/*clear and abort all outbound posted Q*/
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
- && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ && (i++ < acb->maxOutstanding)) {
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1226,7 +1386,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
- while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
/*need to do*/
flag_ccb = readl(&reg->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
@@ -1280,6 +1440,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
pmu->doneq_index = 0x40FF;
}
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_postqueue_isr(acb);
+ break;
}
}
@@ -1293,13 +1456,15 @@ static void arcmsr_remove(struct pci_dev *pdev)
scsi_remove_host(host);
flush_work(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
- for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
+ for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
if (!atomic_read(&acb->ccboutstandingcount))
break;
arcmsr_interrupt(acb);/* FIXME: need spinlock */
@@ -1311,7 +1476,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
arcmsr_abort_allcmd(acb);
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -1335,6 +1500,8 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
flush_work(&acb->arcmsr_do_message_isr_bh);
@@ -1396,6 +1563,13 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
writel(intmask_org | mask, reg->pcief0_int_enable);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
+ writel(intmask_org & mask, &reg->host_int_mask);
+ break;
+ }
}
}
@@ -1527,6 +1701,16 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
spin_unlock_irqrestore(&acb->postq_lock, flags);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *pmu = acb->pmuE;
+ u32 ccb_post_stamp, arc_cdb_size;
+
+ arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
+ ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
+ writel(0, &pmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+ break;
+ }
}
}
@@ -1580,6 +1764,20 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
"timeout\n", pACB->host->host_no);
}
+static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ "timeout\n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -1599,6 +1797,9 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_stop_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_stop_bgrb(acb);
+ break;
}
}
@@ -1633,6 +1834,12 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1673,6 +1880,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1702,6 +1915,11 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -1732,6 +1950,11 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
+ }
+ break;
}
return pqbuffer;
}
@@ -1785,7 +2008,7 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
uint8_t __iomem *iop_data;
uint32_t iop_len;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
iop_data = (uint8_t __iomem *)prbuffer->data;
iop_len = readl(&prbuffer->data_len);
@@ -1871,7 +2094,7 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
arcmsr_write_ioctldata2iop_in_DWORD(acb);
return;
}
@@ -1968,6 +2191,33 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
| ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
}
+static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell, in_doorbell, tmp;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
+ do {
+ writel(0, &reg->host_int_status); /* clear interrupt */
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
+ arcmsr_iop2drv_data_read_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ arcmsr_hbaE_message_isr(pACB);
+ }
+ tmp = in_doorbell;
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = tmp ^ in_doorbell;
+ } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
+ | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
+ pACB->in_doorbell = in_doorbell;
+}
+
static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
@@ -2077,6 +2327,33 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
spin_unlock_irqrestore(&acb->doneq_lock, flags);
}
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t doneq_index;
+ uint16_t cmdSMID;
+ int error;
+ struct MessageUnit_E __iomem *pmu;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ pmu = acb->pmuE;
+ while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ ccb = acb->pccb_pool[cmdSMID];
+ error = (acb->pCompletionQ[doneq_index].cmdFlag
+ & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ }
+ acb->doneq_index = doneq_index;
+ writel(doneq_index, &pmu->reply_post_consumer_index);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -2090,7 +2367,8 @@ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_A __iomem *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
{
@@ -2098,7 +2376,8 @@ static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
/*
**********************************************************************************
@@ -2114,7 +2393,8 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_C __iomem *reg = acb->pmuC;
/*clear interrupt and message state*/
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
@@ -2123,7 +2403,17 @@ static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
readl(reg->outbound_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ writel(0, &reg->host_int_status);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
@@ -2229,6 +2519,31 @@ static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
return IRQ_HANDLED;
}
+static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ host_interrupt_status = readl(&pmu->host_int_status) &
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU ioctl transfer doorbell interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
+ arcmsr_hbaE_doorbell_isr(pACB);
+ }
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
+ arcmsr_hbaE_postqueue_isr(pACB);
+ }
+ host_interrupt_status = readl(&pmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -2242,6 +2557,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
return arcmsr_hbaC_handle_isr(acb);
case ACB_ADAPTER_TYPE_D:
return arcmsr_hbaD_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_E:
+ return arcmsr_hbaE_handle_isr(acb);
default:
return IRQ_NONE;
}
@@ -2636,74 +2953,66 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
- char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
- char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
int count;
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
- printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
- miscellaneous data' timeout \n", acb->host->host_no);
- return false;
- }
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
+ uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
+ uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
+ uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
+ uint32_t *firm_model = &rwbuffer[15];
+ uint32_t *firm_version = &rwbuffer[17];
+ uint32_t *device_map = &rwbuffer[21];
+
+ count = 2;
+ while (count) {
+ *acb_firm_model = readl(firm_model);
acb_firm_model++;
- iop_firm_model++;
+ firm_model++;
count--;
}
-
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
+ count = 4;
+ while (count) {
+ *acb_firm_version = readl(firm_version);
acb_firm_version++;
- iop_firm_version++;
+ firm_version++;
count--;
}
-
- count=16;
- while(count){
- *acb_device_map = readb(iop_device_map);
+ count = 4;
+ while (count) {
+ *acb_device_map = readl(device_map);
acb_device_map++;
- iop_device_map++;
+ device_map++;
count--;
}
+ pACB->signature = readl(&rwbuffer[0]);
+ pACB->firm_request_len = readl(&rwbuffer[1]);
+ pACB->firm_numbers_queue = readl(&rwbuffer[2]);
+ pACB->firm_sdram_size = readl(&rwbuffer[3]);
+ pACB->firm_hd_channels = readl(&rwbuffer[4]);
+ pACB->firm_cfg_version = readl(&rwbuffer[25]);
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
- acb->signature = readl(&reg->message_rwbuffer[0]);
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ pACB->host->host_no,
+ pACB->firm_model,
+ pACB->firm_version);
+}
+
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ arcmsr_wait_firmware_ready(acb);
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+ return false;
+ }
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- /*firm_model,15,60-67*/
- char __iomem *iop_firm_version;
- /*firm_version,17,68-83*/
- char __iomem *iop_device_map;
- /*firm_version,21,84-99*/
- int count;
-
- iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
- iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
- iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
arcmsr_wait_firmware_ready(acb);
writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
@@ -2717,127 +3026,43 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
miscellaneous data' timeout \n", acb->host->host_no);
return false;
}
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
-
- count = 16;
- while(count){
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
- }
-
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
-
- acb->signature = readl(&reg->message_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- /*firm_ide_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*firm_ide_channels,4,16-19*/
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
- uint32_t intmask_org, Index, firmware_state = 0;
+ uint32_t intmask_org;
struct MessageUnit_C __iomem *reg = pACB->pmuC;
- char *acb_firm_model = pACB->firm_model;
- char *acb_firm_version = pACB->firm_version;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
- char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
- int count;
+
/* disable all outbound interrupt */
intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
/* wait firmware ready */
- do {
- firmware_state = readl(&reg->outbound_msgaddr1);
- } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
+ arcmsr_wait_firmware_ready(pACB);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
/* wait message ready */
- for (Index = 0; Index < 2000; Index++) {
- if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
- writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
- break;
- }
- udelay(10);
- } /*max 1 seconds*/
- if (Index >= 2000) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- pACB->host->host_no,
- pACB->firm_model,
- pACB->firm_version);
- pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
- pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
- pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
- pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
- pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*all interrupt service will be enable at arcmsr_iop_init*/
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
{
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- char __iomem *iop_firm_version;
- char __iomem *iop_device_map;
- u32 count;
struct MessageUnit_D *reg = acb->pmuD;
- iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
- iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
- iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
if (readl(acb->pmuD->outbound_doorbell) &
ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
acb->pmuD->outbound_doorbell);/*clear interrupt*/
}
+ arcmsr_wait_firmware_ready(acb);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
/* wait message ready */
@@ -2846,42 +3071,33 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
"miscellaneous data timeout\n", acb->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
+ arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
+ return true;
+}
+
+static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+ uint32_t intmask_org;
+
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ arcmsr_wait_firmware_ready(pACB);
+ mdelay(20);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ /* wait message ready */
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait get adapter firmware "
+ "miscellaneous data timeout\n", pACB->host->host_no);
+ return false;
}
- acb->signature = readl(&reg->msgcode_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
- /*firm_hd_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
@@ -2902,14 +3118,20 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_get_config(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_get_config(acb);
+ break;
default:
break;
}
- if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
- acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
+ acb->maxOutstanding = acb->firm_numbers_queue - 1;
+ if (acb->host->can_queue >= acb->firm_numbers_queue)
+ acb->host->can_queue = acb->maxOutstanding;
else
- acb->maxOutstanding = acb->firm_numbers_queue - 1;
- acb->host->can_queue = acb->maxOutstanding;
+ acb->maxOutstanding = acb->host->can_queue;
+ acb->maxFreeCCB = acb->host->can_queue;
+ if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
+ acb->maxFreeCCB += 64;
return rtn;
}
@@ -3166,6 +3388,75 @@ polling_hbaD_ccb_retry:
return rtn;
}
+static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ bool error;
+ uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
+ uint16_t cmdSMID;
+ unsigned long flags;
+ int rtn;
+ struct CommandControlBlock *pCCB;
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ polling_hbaC_ccb_retry:
+ poll_count++;
+ while (1) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ if ((readl(&reg->reply_post_producer_index) & 0xFFFF) ==
+ doneq_index) {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 40) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbaC_ccb_retry;
+ }
+ }
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ acb->doneq_index = doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ pCCB = acb->pccb_pool[cmdSMID];
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ /* check if command done with no error*/
+ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ pr_notice("arcmsr%d: scsi id = %d "
+ "lun = %d ccb = '0x%p' poll command "
+ "abort successfully\n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ pr_notice("arcmsr%d: polling an illegal "
+ "ccb command done ccb = '0x%p' "
+ "ccboutstandingcount = %d\n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ writel(doneq_index, &reg->reply_post_consumer_index);
+ return rtn;
+}
+
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
@@ -3188,10 +3479,95 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
+ break;
}
return rtn;
}
+static void arcmsr_set_iop_datetime(struct timer_list *t)
+{
+ struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
+ unsigned int next_time;
+ struct tm tm;
+
+ union {
+ struct {
+ uint16_t signature;
+ uint8_t year;
+ uint8_t month;
+ uint8_t date;
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ } a;
+ struct {
+ uint32_t msg_time[2];
+ } b;
+ } datetime;
+
+ time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
+
+ datetime.a.signature = 0x55AA;
+ datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
+ datetime.a.month = tm.tm_mon;
+ datetime.a.date = tm.tm_mday;
+ datetime.a.hour = tm.tm_hour;
+ datetime.a.minute = tm.tm_min;
+ datetime.a.second = tm.tm_sec;
+
+ switch (pacb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = pacb->pmuA;
+ writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_B *reg = pacb->pmuB;
+ rwbuffer = reg->message_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = pacb->pmuC;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_D *reg = pacb->pmuD;
+ rwbuffer = reg->msgcode_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = pacb->pmuE;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ }
+ if (sys_tz.tz_minuteswest)
+ next_time = ARCMSR_HOURS;
+ else
+ next_time = ARCMSR_MINUTES;
+ mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
+}
+
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
@@ -3208,6 +3584,10 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
dma_coherent_handle = acb->dma_coherent_handle2;
break;
+ case ACB_ADAPTER_TYPE_E:
+ dma_coherent_handle = acb->dma_coherent_handle +
+ offsetof(struct CommandControlBlock, arcmsr_cdb);
+ break;
default:
dma_coherent_handle = acb->dma_coherent_handle;
break;
@@ -3316,6 +3696,29 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
+ writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
+ writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
+ dma_coherent_handle = acb->dma_coherent_handle2;
+ cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
+ cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
+ writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout \n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -3356,83 +3759,22 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
}
break;
- }
-}
-
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_B *reg = acb->pmuB;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_C __iomem *reg = acb->pmuC;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ break;
}
- return;
}
-static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct MessageUnit_D *reg = acb->pmuD;
-
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
- ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
- ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ (acb->acb_flags & ACB_F_BUS_RESET) ||
+ (acb->acb_flags & ACB_F_ABORT)) {
mod_timer(&acb->eternal_timer,
jiffies + msecs_to_jiffies(6 * HZ));
} else {
@@ -3448,32 +3790,40 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
msecs_to_jiffies(6 * HZ));
return;
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
- reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies +
- msecs_to_jiffies(6 * HZ));
- }
-}
-
-static void arcmsr_request_device_map(struct timer_list *t)
-{
- struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
- switch (acb->adapter_type) {
+ switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_hbaA_request_device_map(acb);
- }
- break;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ break;
+ }
case ACB_ADAPTER_TYPE_B: {
- arcmsr_hbaB_request_device_map(acb);
- }
- break;
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
+ break;
+ }
case ACB_ADAPTER_TYPE_C: {
- arcmsr_hbaC_request_device_map(acb);
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ default:
+ return;
}
- break;
- case ACB_ADAPTER_TYPE_D:
- arcmsr_hbaD_request_device_map(acb);
- break;
+ acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
}
@@ -3524,6 +3874,20 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
}
}
+static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &pmu->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'start adapter "
+ "background rebulid' timeout \n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -3539,6 +3903,9 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_start_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_start_bgrb(acb);
+ break;
}
}
@@ -3558,10 +3925,19 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
- /*clear interrupt and message state*/
- writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ uint32_t outbound_doorbell, i;
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(reg->iop2drv_doorbell);
+ if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
+ } else
+ break;
+ }
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -3607,6 +3983,27 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ uint32_t i, tmp;
+
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ tmp = acb->in_doorbell;
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
}
}
@@ -3658,6 +4055,19 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
writel(0xD, &pmuC->write_sequence);
} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+ } else if (acb->dev_id == 0x1884) {
+ struct MessageUnit_E __iomem *pmuE = acb->pmuE;
+ do {
+ count++;
+ writel(0x4, &pmuE->write_sequence_3xxx);
+ writel(0xB, &pmuE->write_sequence_3xxx);
+ writel(0x2, &pmuE->write_sequence_3xxx);
+ writel(0x7, &pmuE->write_sequence_3xxx);
+ writel(0xD, &pmuE->write_sequence_3xxx);
+ mdelay(10);
+ } while (((readl(&pmuE->host_diagnostic_3xxx) &
+ ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
+ writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
} else if ((acb->dev_id == 0x1214)) {
writel(0x20, pmuD->reset_request);
} else {
@@ -3671,6 +4081,45 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
msleep(1000);
return;
}
+
+static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
+{
+ bool rtn = true;
+
+ switch(acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ rtn = ((readl(&reg->outbound_msgaddr1) &
+ ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B:{
+ struct MessageUnit_B *reg = acb->pmuB;
+ rtn = ((readl(reg->iop2drv_doorbell) &
+ ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C:{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:{
+ struct MessageUnit_D *reg = acb->pmuD;
+ rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
+ true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_E:{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ rtn = (readl(&reg->host_diagnostic_3xxx) &
+ ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
+ }
+ break;
+ }
+ return rtn;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
@@ -3703,7 +4152,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
rtnval = arcmsr_abort_allcmd(acb);
/* clear all outbound posted Q */
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
scsi_dma_unmap(ccb->pcmd);
@@ -3725,197 +4174,55 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb;
- uint32_t intmask_org, outbound_doorbell;
int retry_count = 0;
int rtn = FAILED;
acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
- printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
+ pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
+ " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
acb->num_resets++;
- switch(acb->adapter_type){
- case ACB_ADAPTER_TYPE_A:{
- if (acb->acb_flags & ACB_F_BUS_RESET){
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_A __iomem *reg;
- reg = acb->pmuA;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep_again:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep_again;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
- writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_B:{
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = FAILED;
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_C:{
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_C __iomem *reg;
- reg = acb->pmuC;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- arcmsr_clear_doorbell_queue_buffer(acb);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_D: {
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- pr_notice("arcmsr: there is an bus reset"
- " eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags
- & ACB_F_BUS_RESET) == 0, 220 * HZ);
- if (timeout)
- return SUCCESS;
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_D *reg;
- reg = acb->pmuD;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
- nap:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(reg->sample_at_reset) & 0x80) != 0) {
- pr_err("arcmsr%d: waiting for "
- "hw bus reset return, retry=%d\n",
- acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- pr_err("arcmsr%d: waiting for hw bus"
- " reset return, "
- "RETRY TERMINATED!!\n",
- acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto nap;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- arcmsr_clear_doorbell_queue_buffer(acb);
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- pr_err("arcmsr: scsi bus reset "
- "eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags
+ & ACB_F_BUS_RESET) == 0, 220 * HZ);
+ if (timeout)
+ return SUCCESS;
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+wait_reset_done:
+ ssleep(ARCMSR_SLEEPTIME);
+ if (arcmsr_reset_in_progress(acb)) {
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ pr_notice("arcmsr%d: waiting for hw bus reset"
+ " return, RETRY TERMINATED!!\n",
+ acb->host->host_no);
+ return FAILED;
}
- break;
+ retry_count++;
+ goto wait_reset_done;
}
+ arcmsr_iop_init(acb);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ pr_notice("arcmsr: scsi bus reset eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
}
return rtn;
}
@@ -3953,7 +4260,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
}
intmask_org = arcmsr_disable_outbound_ints(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -3999,6 +4306,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
+ case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
default:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index f4775ca70bab..27bda2b05de6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* have valid data in the sense buffer that could
* confuse the higher levels.
*/
- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
/*
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 3e1caec82554..10a63be92544 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1957,7 +1957,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
};
- *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
+ *npciids = ARRAY_SIZE(__pciids);
*pciids = __pciids;
}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index df6760ca0911..9685efc59b16 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -35,10 +35,10 @@
#define BFA_TRC_TS(_trcm) \
({ \
- struct timeval tv; \
+ struct timespec64 ts; \
\
- do_gettimeofday(&tv); \
- (tv.tv_sec*1000000+tv.tv_usec); \
+ ktime_get_ts64(&ts); \
+ (ts.tv_sec*1000000+ts.tv_nsec / 1000); \
})
#ifndef BFA_TRC_TS
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e81707f938cb..3d0c96a5c873 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1455,7 +1455,8 @@ struct bfa_aen_entry_s {
enum bfa_aen_category aen_category;
u32 aen_type;
union bfa_aen_data_u aen_data;
- struct timeval aen_tv;
+ u64 aen_tv_sec;
+ u64 aen_tv_usec;
u32 seq_num;
u32 bfad_num;
};
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b8dadc9cc993..d3b00a475aeb 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- rspnid->spn_len = (u8) strlen((char *)name);
- strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
+ strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- rsnn_nn->snn_len = (u8) strlen((char *)name);
- strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+ strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
}
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 5f53b3276234..2c85f5b1f9c1 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -468,7 +468,7 @@ bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
}
bfa_status_t
-bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
{
struct bfa_itnim_s *itnim;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
@@ -1478,6 +1478,7 @@ bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
return BFA_STATUS_IOPROFILE_OFF;
itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ /* unsigned 32-bit time_t overflow here in y2106 */
itnim->ioprofile.io_profile_start_time =
bfa_io_profile_start_time(itnim->bfa);
itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index e93921dec347..ec8f863540ae 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -136,7 +136,7 @@ struct bfa_fcpim_s {
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
bfa_boolean_t ioredirect;
bfa_boolean_t io_profile;
- u32 io_profile_start_time;
+ time64_t io_profile_start_time;
bfa_fcpim_profile_t profile_comp;
bfa_fcpim_profile_t profile_start;
};
@@ -310,7 +310,7 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 4aa61e20e82d..932feb0ed4da 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcpy(port_cfg->sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname, driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/*
* Host OS Info :
@@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
* OS name string and instead copy the entire OS info string (64 bytes).
*/
if (driver_info->host_os_patch[0] == '\0') {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_OS_STR_LEN);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
} else {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Append host OS Patch Info */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_patch,
- BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_patch,
+ BFA_SYMNAME_MAXLEN);
}
/* null terminate */
@@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->node_sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcpy(port_cfg->node_sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->node_sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* null terminate */
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 638c0a2857f7..b4f2c1d8742e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strncpy(hba_attr->driver_version, (char *)driver_info->version,
+ strlcpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strncpy(hba_attr->os_name, driver_info->host_os_name,
+ strlcpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
* to the os name along with a separator
*/
if (driver_info->host_os_patch[0] != '\0') {
- strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
- strncat(hba_attr->os_name, driver_info->host_os_patch,
- sizeof(driver_info->host_os_patch));
+ strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(hba_attr->os_name));
+ strlcat(hba_attr->os_name, driver_info->host_os_patch,
+ sizeof(hba_attr->os_name));
}
/* Retrieve the max frame size from the port attr */
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strncpy(hba_attr->node_sym_name.symname,
+ strlcpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "QLogic");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
+ strlcpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
+ strlcpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strncpy(port_attr->port_sym_name.symname,
- (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+ strlcpy(port_attr->port_sym_name.symname,
+ bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
port_attr->scos = pport_attr.cos_supported;
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strncpy((char *)psymbl,
- (char *) &
- (bfa_fcs_lport_get_psym_name
+ strlcpy(symbl,
+ (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs)))] = 0;
- strncat((char *)psymbl,
- (char *) &(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
+
+ strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ sizeof(symbl));
} else {
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
}
@@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
- u8 *psymbl = &symbl[0];
int len;
/* Avoid sending RSPN in the following states. */
@@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs)))] = 0;
+ sizeof(symbl));
- strncat((char *)psymbl,
+ strlcat(symbl,
(char *)&(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
- bfa_fcs_lport_get_fcid(port), 0, psymbl);
+ bfa_fcs_lport_get_fcid(port), 0, symbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 256f4afaccf9..16d3aeb0e572 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1809,13 +1809,12 @@ static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
- struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
- do_gettimeofday(&tv);
- enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -1826,6 +1825,9 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
+ disable_req.clscode = cpu_to_be16(ioc->clscode);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -2803,7 +2805,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index da1721e0d167..079bc77f4102 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -96,14 +96,11 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
port->stats_busy = BFA_FALSE;
if (status == BFA_STATUS_OK) {
- struct timeval tv;
-
memcpy(port->stats, port->stats_dma.kva,
sizeof(union bfa_port_stats_u));
bfa_port_stats_swap(port, port->stats);
- do_gettimeofday(&tv);
- port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
+ port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
}
if (port->stats_cbfn) {
@@ -124,16 +121,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
static void
bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
{
- struct timeval tv;
-
port->stats_status = status;
port->stats_busy = BFA_FALSE;
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
if (port->stats_cbfn) {
port->stats_cbfn(port->stats_cbarg, status);
@@ -471,8 +465,6 @@ void
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod)
{
- struct timeval tv;
-
WARN_ON(!port);
port->dev = dev;
@@ -494,8 +486,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
bfa_trc(port, 0);
}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 26dc1bf14c85..0c3b200243ca 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -36,7 +36,7 @@ struct bfa_port_s {
bfa_port_stats_cbfn_t stats_cbfn;
void *stats_cbarg;
bfa_status_t stats_status;
- u32 stats_reset_time;
+ time64_t stats_reset_time;
union bfa_port_stats_u *stats;
struct bfa_dma_s stats_dma;
bfa_boolean_t endis_pending;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index e640223bab3c..6fc34fb20f00 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -288,18 +288,6 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
return 0;
}
-static u64
-bfa_get_log_time(void)
-{
- u64 system_time = 0;
- struct timeval tv;
- do_gettimeofday(&tv);
-
- /* We are interested in seconds only. */
- system_time = tv.tv_sec;
- return system_time;
-}
-
static void
bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
{
@@ -320,7 +308,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
- pl_recp->tv = bfa_get_log_time();
+ pl_recp->tv = ktime_get_real_seconds();
BFA_PL_LOG_REC_INCR(plog->tail);
if (plog->head == plog->tail)
@@ -350,8 +338,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strncpy(lp.log_entry.string_log, log_str,
- BFA_PL_STRING_LOG_SZ - 1);
+ strlcpy(lp.log_entry.string_log, log_str,
+ BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
}
@@ -3047,7 +3035,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
struct bfa_fcport_ln_s *ln = &fcport->ln;
- struct timeval tv;
fcport->bfa = bfa;
ln->fcport = fcport;
@@ -3060,8 +3047,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
fcport->stats_dma_ready = BFA_FALSE;
/*
@@ -3295,9 +3281,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
union bfa_fcport_stats_u *ret;
if (complete) {
- struct timeval tv;
- if (fcport->stats_status == BFA_STATUS_OK)
- do_gettimeofday(&tv);
+ time64_t time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
bfa_q_deq(&fcport->stats_pending_q, &qe);
@@ -3312,7 +3296,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
&fcport->stats->fcoe);
ret->fcoe.secs_reset =
- tv.tv_sec - fcport->stats_reset_time;
+ time - fcport->stats_reset_time;
}
}
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
@@ -3373,13 +3357,10 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
struct list_head *qe, *qen;
if (complete) {
- struct timeval tv;
-
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
cb = (struct bfa_cb_pending_q_s *)qe;
@@ -6148,13 +6129,13 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
/*
* D-port
*/
-#define bfa_dport_result_start(__dport, __mode) do { \
- (__dport)->result.start_time = bfa_get_log_time(); \
- (__dport)->result.status = DPORT_TEST_ST_INPRG; \
- (__dport)->result.mode = (__mode); \
- (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
- (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
- (__dport)->result.lpcnt = (__dport)->lpcnt; \
+#define bfa_dport_result_start(__dport, __mode) do { \
+ (__dport)->result.start_time = ktime_get_real_seconds(); \
+ (__dport)->result.status = DPORT_TEST_ST_INPRG; \
+ (__dport)->result.mode = (__mode); \
+ (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
+ (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
+ (__dport)->result.lpcnt = (__dport)->lpcnt; \
} while (0)
static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
@@ -6588,7 +6569,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
switch (dport->i2hmsg.scn.state) {
case BFI_DPORT_SCN_TESTCOMP:
- dport->result.end_time = bfa_get_log_time();
+ dport->result.end_time = ktime_get_real_seconds();
bfa_trc(dport->bfa, dport->result.end_time);
dport->result.status = msg->info.testcomp.status;
@@ -6635,7 +6616,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
case BFI_DPORT_SCN_SUBTESTSTART:
subtesttype = msg->info.teststart.type;
dport->result.subtest[subtesttype].start_time =
- bfa_get_log_time();
+ ktime_get_real_seconds();
dport->result.subtest[subtesttype].status =
DPORT_TEST_ST_INPRG;
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index ea2278bc78a8..7e8fb6231d49 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -505,7 +505,7 @@ struct bfa_fcport_s {
struct list_head stats_pending_q;
struct list_head statsclr_pending_q;
bfa_boolean_t stats_qfull;
- u32 stats_reset_time; /* stats reset time stamp */
+ time64_t stats_reset_time; /* stats reset time stamp */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cf0466686804..bd7e6a6fc1f1 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -610,13 +610,12 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
/* Iterate through the KVA meminfo queue */
list_for_each(km_qe, &kva_info->qe) {
kva_elem = (struct bfa_mem_kva_s *) km_qe;
- kva_elem->kva = vmalloc(kva_elem->mem_len);
+ kva_elem->kva = vzalloc(kva_elem->mem_len);
if (kva_elem->kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
- memset(kva_elem->kva, 0, kva_elem->mem_len);
}
/* Iterate through the DMA meminfo queue */
@@ -981,20 +980,20 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strncpy(driver_info.version, BFAD_DRIVER_VERSION,
- sizeof(driver_info.version) - 1);
+ strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ sizeof(driver_info.version));
if (host_name)
- strncpy(driver_info.host_machine_name, host_name,
- sizeof(driver_info.host_machine_name) - 1);
+ strlcpy(driver_info.host_machine_name, host_name,
+ sizeof(driver_info.host_machine_name));
if (os_name)
- strncpy(driver_info.host_os_name, os_name,
- sizeof(driver_info.host_os_name) - 1);
+ strlcpy(driver_info.host_os_name, os_name,
+ sizeof(driver_info.host_os_name));
if (os_patch)
- strncpy(driver_info.host_os_patch, os_patch,
- sizeof(driver_info.host_os_patch) - 1);
+ strlcpy(driver_info.host_os_patch, os_patch,
+ sizeof(driver_info.host_os_patch));
- strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name) - 1);
+ strlcpy(driver_info.os_device_name, bfad->pci_name,
+ sizeof(driver_info.os_device_name));
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 13db3b7bc873..d4d276c757ea 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -487,7 +487,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
- struct bfad_port_s *port;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
wwn_t pwwn;
@@ -502,8 +501,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
return 0;
}
- port = im_port->port;
-
vshost = vport->drv_port.im_port->shost;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
@@ -843,7 +840,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2fa195adc7a..3976e787ba64 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strncpy(iocmd->attr.port_symname.symname,
+ strlcpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
- sizeof(port_attr.port_cfg.sym_name.symname));
+ sizeof(iocmd->attr.port_symname.symname));
iocmd->status = BFA_STATUS_OK;
return 0;
@@ -2094,13 +2094,11 @@ bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_profile_s *iocmd =
(struct bfa_bsg_fcpim_profile_s *)cmd;
- struct timeval tv;
unsigned long flags;
- do_gettimeofday(&tv);
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
- iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 05f523971348..349cfe7d055e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -81,7 +81,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
@@ -89,8 +89,6 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
fw_debug->debug_buffer,
@@ -125,7 +123,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
@@ -133,8 +131,6 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
fw_debug->debug_buffer,
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 06ce4ba2b7bc..af66275570c3 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,16 +141,28 @@ struct bfad_im_s {
} while (0)
/* post fc_host vendor event */
-#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
- do_gettimeofday(&(_entry)->aen_tv); \
- (_entry)->bfad_num = (_drv)->inst_no; \
- (_entry)->seq_num = (_cnt); \
- (_entry)->aen_category = (_cat); \
- (_entry)->aen_type = (_evt); \
- if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
- queue_work((_drv)->im->drv_workq, \
- &(_drv)->im->aen_im_notify_work); \
-} while (0)
+static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
+ struct bfad_s *drv, int cnt,
+ enum bfa_aen_category cat,
+ enum bfa_ioc_aen_event evt)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ /*
+ * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
+ * architectures, or in 2038 if user space interprets it
+ * as 'signed'.
+ */
+ entry->aen_tv_sec = ts.tv_sec;
+ entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ entry->bfad_num = drv->inst_no;
+ entry->seq_num = cnt;
+ entry->aen_category = cat;
+ entry->aen_type = evt;
+ if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
+ queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
+}
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e6b9de7d41ac..65de1d0578a1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1552,7 +1552,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
rc = bnx2fc_shost_config(lport, parent);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
interface->netdev->name);
goto lp_config_err;
}
@@ -1560,7 +1560,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
/* Initialize the libfc library */
rc = bnx2fc_libfc_config(lport);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ printk(KERN_ERR PFX "Couldn't configure libfc\n");
goto shost_err;
}
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 26de61d65a4d..e8ae4d671d23 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,16 +1857,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
goto out;
}
- memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
/*
* Allocate task_ctx which is an array of pointers pointing to
@@ -1895,16 +1894,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
goto out3;
}
- memset(hba->task_ctx[i], 0, PAGE_SIZE);
addr = (u64)hba->task_ctx_dma[i];
task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
task_ctx_bdt->lo = cpu_to_le32((u32)addr);
@@ -2033,28 +2031,23 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] =
- dma_alloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_segments[i], 0,
- BNX2FC_HASH_TBL_CHUNK_SIZE);
}
- hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
pbl = hba->hash_tbl_pbl;
for (i = 0; i < segment_count; ++i) {
@@ -2111,27 +2104,26 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
+ mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl, 0x00, mem_size);
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
addr = (unsigned long) hba->t2_hash_tbl_dma +
((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
@@ -2148,16 +2140,14 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->stats_buffer, 0x00, PAGE_SIZE);
return 0;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a8ae1a019eea..e3d1c7c440c8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,56 +672,52 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->sq, 0, tgt->sq_mem_size);
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->cq, 0, tgt->cq_mem_size);
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->rq, 0, tgt->rq_mem_size);
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -739,44 +735,43 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
- &tgt->xferq_dma, GFP_KERNEL);
+ tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->xferq, 0, tgt->xferq_mem_size);
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
- &tgt->confq_dma, GFP_KERNEL);
+ tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -792,15 +787,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
/* Allocate and map LCQ */
@@ -808,15 +802,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
tgt->lcq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->lcq, 0, tgt->lcq_mem_size);
tgt->conn_db->rq_prod = 0x8000;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e0640e0f259f..8f03a869ac98 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -547,12 +547,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
- if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_wqe->lun[0];
- /* 57710 requires LUN field to be swapped */
- nopout_wqe->lun[0] = nopout_wqe->lun[1];
- nopout_wqe->lun[1] = tmp;
- }
+ /* 57710 requires LUN field to be swapped */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
@@ -1073,15 +1070,14 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
ep->qp.sq_first_qe = ep->qp.sq_virt;
ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
@@ -1110,14 +1106,13 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
ep->qp.cq_first_qe = ep->qp.cq_virt;
ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cb1711a5d7a3..ed2dae657964 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1258,7 +1258,7 @@ module_init(csio_init);
module_exit(csio_exit);
MODULE_AUTHOR(CSIO_DRV_AUTHOR);
MODULE_DESCRIPTION(CSIO_DRV_DESC);
-MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 96b31e5af91e..20244254325a 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -48,7 +48,6 @@
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
-#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0-ko"
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 931b1d8f9f3e..5f4e0a787bd1 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1216,7 +1216,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Queue mbox cmd, if another mbox cmd is active */
if (mbp->mb_cbfn == NULL) {
rv = -EBUSY;
- csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
hw->pfn, *((uint8_t *)mbp->mb));
goto error_out;
@@ -1244,14 +1244,14 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
rv = owner ? -EBUSY : -ETIMEDOUT;
csio_dbg(hw,
- "Couldnt own Mailbox %x op:0x%x "
+ "Couldn't own Mailbox %x op:0x%x "
"owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb), owner);
goto error_out;
} else {
if (mbm->mcurrent == NULL) {
csio_err(hw,
- "Couldnt own Mailbox %x "
+ "Couldn't own Mailbox %x "
"op:0x%x owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb),
owner);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ce1336414e0a..3f3af5e74a07 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1914,7 +1914,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
if (task->sc) {
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
} else {
- task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
if (!task->hdr) {
__kfree_skb(tdata->skb);
tdata->skb = NULL;
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 9e39866d473b..7ec3f6b55dde 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
new file mode 100644
index 000000000000..339e42b03c49
--- /dev/null
+++ b/drivers/scsi/cxlflash/backend.h
@@ -0,0 +1,41 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
+
+struct cxlflash_backend_ops {
+ struct module *module;
+ void __iomem * (*psa_map)(void *);
+ void (*psa_unmap)(void __iomem *);
+ int (*process_element)(void *);
+ int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
+ void (*unmap_afu_irq)(void *, int, void *);
+ int (*start_context)(void *);
+ int (*stop_context)(void *);
+ int (*afu_reset)(void *);
+ void (*set_master)(void *);
+ void * (*get_context)(struct pci_dev *, void *);
+ void * (*dev_context_init)(struct pci_dev *, void *);
+ int (*release_context)(void *);
+ void (*perst_reloads_same_image)(void *, bool);
+ ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
+ int (*allocate_afu_irqs)(void *, int);
+ void (*free_afu_irqs)(void *);
+ void * (*create_afu)(struct pci_dev *);
+ struct file * (*get_fd)(void *, struct file_operations *, int *);
+ void * (*fops_get_context)(struct file *);
+ int (*start_work)(void *, u64);
+ int (*fd_mmap)(struct file *, struct vm_area_struct *);
+ int (*fd_release)(struct inode *, struct file *);
+};
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6d95e8e147e0..102fd26ca886 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -25,6 +25,8 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include "backend.h"
+
extern const struct file_operations cxlflash_cxl_fops;
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
struct cxlflash_cfg {
struct afu *afu;
+ const struct cxlflash_backend_ops *ops;
struct pci_dev *dev;
struct pci_device_id *dev_id;
struct Scsi_Host *host;
@@ -129,7 +132,7 @@ struct cxlflash_cfg {
int lr_port;
atomic_t scan_host_needed;
- struct cxl_afu *cxl_afu;
+ void *afu_cookie;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
@@ -203,8 +206,7 @@ struct hwq {
* fields after this point
*/
struct afu *afu;
- struct cxl_context *ctx;
- struct cxl_ioctl_start_work work;
+ void *ctx_cookie;
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
new file mode 100644
index 000000000000..db1cadad5c5d
--- /dev/null
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -0,0 +1,168 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <misc/cxl.h>
+
+#include "backend.h"
+
+/*
+ * The following routines map the cxlflash backend operations to existing CXL
+ * kernel API function and are largely simple shims that provide an abstraction
+ * for converting generic context and AFU cookies into cxl_context or cxl_afu
+ * pointers.
+ */
+
+static void __iomem *cxlflash_psa_map(void *ctx_cookie)
+{
+ return cxl_psa_map(ctx_cookie);
+}
+
+static void cxlflash_psa_unmap(void __iomem *addr)
+{
+ cxl_psa_unmap(addr);
+}
+
+static int cxlflash_process_element(void *ctx_cookie)
+{
+ return cxl_process_element(ctx_cookie);
+}
+
+static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
+ irq_handler_t handler, void *cookie, char *name)
+{
+ return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
+}
+
+static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
+{
+ cxl_unmap_afu_irq(ctx_cookie, num, cookie);
+}
+
+static int cxlflash_start_context(void *ctx_cookie)
+{
+ return cxl_start_context(ctx_cookie, 0, NULL);
+}
+
+static int cxlflash_stop_context(void *ctx_cookie)
+{
+ return cxl_stop_context(ctx_cookie);
+}
+
+static int cxlflash_afu_reset(void *ctx_cookie)
+{
+ return cxl_afu_reset(ctx_cookie);
+}
+
+static void cxlflash_set_master(void *ctx_cookie)
+{
+ cxl_set_master(ctx_cookie);
+}
+
+static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_get_context(dev);
+}
+
+static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_dev_context_init(dev);
+}
+
+static int cxlflash_release_context(void *ctx_cookie)
+{
+ return cxl_release_context(ctx_cookie);
+}
+
+static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
+{
+ cxl_perst_reloads_same_image(afu_cookie, image);
+}
+
+static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
+ void *buf, size_t count)
+{
+ return cxl_read_adapter_vpd(dev, buf, count);
+}
+
+static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
+{
+ return cxl_allocate_afu_irqs(ctx_cookie, num);
+}
+
+static void cxlflash_free_afu_irqs(void *ctx_cookie)
+{
+ cxl_free_afu_irqs(ctx_cookie);
+}
+
+static void *cxlflash_create_afu(struct pci_dev *dev)
+{
+ return cxl_pci_to_afu(dev);
+}
+
+static struct file *cxlflash_get_fd(void *ctx_cookie,
+ struct file_operations *fops, int *fd)
+{
+ return cxl_get_fd(ctx_cookie, fops, fd);
+}
+
+static void *cxlflash_fops_get_context(struct file *file)
+{
+ return cxl_fops_get_context(file);
+}
+
+static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
+{
+ struct cxl_ioctl_start_work work = { 0 };
+
+ work.num_interrupts = irqs;
+ work.flags = CXL_START_WORK_NUM_IRQS;
+
+ return cxl_start_work(ctx_cookie, &work);
+}
+
+static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ return cxl_fd_mmap(file, vm);
+}
+
+static int cxlflash_fd_release(struct inode *inode, struct file *file)
+{
+ return cxl_fd_release(inode, file);
+}
+
+const struct cxlflash_backend_ops cxlflash_cxl_ops = {
+ .module = THIS_MODULE,
+ .psa_map = cxlflash_psa_map,
+ .psa_unmap = cxlflash_psa_unmap,
+ .process_element = cxlflash_process_element,
+ .map_afu_irq = cxlflash_map_afu_irq,
+ .unmap_afu_irq = cxlflash_unmap_afu_irq,
+ .start_context = cxlflash_start_context,
+ .stop_context = cxlflash_stop_context,
+ .afu_reset = cxlflash_afu_reset,
+ .set_master = cxlflash_set_master,
+ .get_context = cxlflash_get_context,
+ .dev_context_init = cxlflash_dev_context_init,
+ .release_context = cxlflash_release_context,
+ .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
+ .read_adapter_vpd = cxlflash_read_adapter_vpd,
+ .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
+ .free_afu_irqs = cxlflash_free_afu_irqs,
+ .create_afu = cxlflash_create_afu,
+ .get_fd = cxlflash_get_fd,
+ .fops_get_context = cxlflash_fops_get_context,
+ .start_work = cxlflash_start_work,
+ .fd_mmap = cxlflash_fd_mmap,
+ .fd_release = cxlflash_fd_release,
+};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 38b3a9c84fd1..d8fe7ab870b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
cmd->parent = afu;
cmd->hwq_index = hwq_index;
+ cmd->sa.ioasc = 0;
cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
@@ -710,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
}
if (likely(afu->afu_map)) {
- cxl_psa_unmap((void __iomem *)afu->afu_map);
+ cfg->ops->psa_unmap(afu->afu_map);
afu->afu_map = NULL;
}
}
@@ -738,7 +739,7 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
@@ -747,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
case UNMAP_THREE:
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
- cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
case UNMAP_TWO:
- cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
case UNMAP_ONE:
- cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
case FREE_IRQ:
- cxl_free_afu_irqs(hwq->ctx);
+ cfg->ops->free_afu_irqs(hwq->ctx_cookie);
/* fall through */
case UNDO_NOOP:
/* No action required */
@@ -782,15 +783,15 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
- WARN_ON(cxl_stop_context(hwq->ctx));
+ WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
if (index != PRIMARY_HWQ)
- WARN_ON(cxl_release_context(hwq->ctx));
- hwq->ctx = NULL;
+ WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
+ hwq->ctx_cookie = NULL;
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
@@ -1598,27 +1599,6 @@ out:
}
/**
- * start_context() - starts the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Return: A success or failure value from CXL services.
- */
-static int start_context(struct cxlflash_cfg *cfg, u32 index)
-{
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
-
- rc = cxl_start_context(hwq->ctx,
- hwq->work.work_element_descriptor,
- NULL);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
* read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host.
* @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
@@ -1640,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
- vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
+ vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
@@ -1732,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map;
struct hwq *hwq;
+ void *cookie;
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1746,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Copy frequently used fields into hwq */
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
+ cookie = hwq->ctx_cookie;
- hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
+ hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
@@ -1925,13 +1907,13 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
struct hwq *hwq)
{
struct device *dev = &cfg->dev->dev;
- struct cxl_context *ctx = hwq->ctx;
+ void *ctx = hwq->ctx_cookie;
int rc = 0;
enum undo_level level = UNDO_NOOP;
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
int num_irqs = is_primary_hwq ? 3 : 2;
- rc = cxl_allocate_afu_irqs(ctx, num_irqs);
+ rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc);
@@ -1939,16 +1921,16 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
+ "SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
level = FREE_IRQ;
goto out;
}
- rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
+ rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
+ "SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
level = UNMAP_ONE;
@@ -1959,8 +1941,8 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
if (!is_primary_hwq)
goto out;
- rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
+ "SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
level = UNMAP_TWO;
@@ -1979,7 +1961,7 @@ out:
*/
static int init_mc(struct cxlflash_cfg *cfg, u32 index)
{
- struct cxl_context *ctx;
+ void *ctx;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
@@ -1990,23 +1972,23 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
- ctx = cxl_get_context(cfg->dev);
+ ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
else
- ctx = cxl_dev_context_init(cfg->dev);
- if (unlikely(!ctx)) {
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
+ if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM;
goto err1;
}
- WARN_ON(hwq->ctx);
- hwq->ctx = ctx;
+ WARN_ON(hwq->ctx_cookie);
+ hwq->ctx_cookie = ctx;
/* Set it up as a master with the CXL */
- cxl_set_master(ctx);
+ cfg->ops->set_master(ctx);
/* Reset AFU when initializing primary context */
if (index == PRIMARY_HWQ) {
- rc = cxl_afu_reset(ctx);
+ rc = cfg->ops->afu_reset(ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: AFU reset failed rc=%d\n",
__func__, rc);
@@ -2020,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
goto err2;
}
- /* This performs the equivalent of the CXL_IOCTL_START_WORK.
- * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
- * element (pe) that is embedded in the context (ctx)
- */
- rc = start_context(cfg, index);
+ /* Finally, activate the context by starting it */
+ rc = cfg->ops->start_context(hwq->ctx_cookie);
if (unlikely(rc)) {
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
level = UNMAP_THREE;
@@ -2037,9 +2016,9 @@ out:
err2:
term_intr(cfg, level, index);
if (index != PRIMARY_HWQ)
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
err1:
- hwq->ctx = NULL;
+ hwq->ctx_cookie = NULL;
goto out;
}
@@ -2094,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
struct hwq *hwq;
int i;
- cxl_perst_reloads_same_image(cfg->cxl_afu, true);
+ cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) {
@@ -2108,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Map the entire MMIO space of the AFU using the first context */
hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cxl_psa_map(hwq->ctx);
+ afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
if (!afu->afu_map) {
- dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
+ dev_err(dev, "%s: psa_map failed\n", __func__);
rc = -ENOMEM;
goto err1;
}
@@ -3670,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
+ cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops;
/*
@@ -3701,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- cfg->cxl_afu = cxl_pci_to_afu(pdev);
+ cfg->afu_cookie = cfg->ops->create_afu(pdev);
rc = init_pci(cfg);
if (rc) {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 170fff5aeff6..2fe79df5c73c 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -810,20 +810,22 @@ err:
* init_context() - initializes a previously allocated context
* @ctxi: Previously allocated context
* @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained CXL context reference.
+ * @ctx: Previously obtained context cookie.
* @ctxid: Previously obtained process element associated with CXL context.
* @file: Previously obtained file associated with CXL context.
* @perms: User-specified permissions.
+ * @irqs: User-specified number of interrupts.
*/
static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid, struct file *file,
- u32 perms)
+ void *ctx, int ctxid, struct file *file, u32 perms,
+ u64 irqs)
{
struct afu *afu = cfg->afu;
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+ ctxi->irqs = irqs;
ctxi->pid = task_tgid_nr(current); /* tgid = pid */
ctxi->ctx = ctx;
ctxi->cfg = cfg;
@@ -976,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
*/
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct dk_cxlflash_detach detach = { { 0 }, 0 };
@@ -986,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1014,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
out_release:
- cxl_fd_release(inode, file);
+ cfg->ops->fd_release(inode, file);
out:
dev_dbg(dev, "%s: returning\n", __func__);
return 0;
@@ -1089,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
@@ -1099,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
int rc = 0;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1162,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
*/
static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
int rc = 0;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1188,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
- rc = cxl_fd_mmap(file, vma);
+ rc = cfg->ops->fd_mmap(file, vma);
if (likely(!rc)) {
/* Insert ourself in the mmap fault handler path */
ctxi->cxl_mmap_vmops = vma->vm_ops;
@@ -1307,23 +1309,23 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
- struct cxl_ioctl_start_work *work;
struct ctx_info *ctxi = NULL;
struct lun_access *lun_access = NULL;
int rc = 0;
u32 perms;
int ctxid = -1;
+ u64 irqs = attach->num_interrupts;
u64 flags = 0UL;
u64 rctxid = 0UL;
struct file *file = NULL;
- struct cxl_context *ctx = NULL;
+ void *ctx = NULL;
int fd = -1;
- if (attach->num_interrupts > 4) {
+ if (irqs > 4) {
dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, attach->num_interrupts);
+ __func__, irqs);
rc = -EINVAL;
goto out;
}
@@ -1394,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1402,25 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- work = &ctxi->work;
- work->num_interrupts = attach->num_interrupts;
- work->flags = CXL_START_WORK_NUM_IRQS;
-
- rc = cxl_start_work(ctx, work);
+ rc = cfg->ops->start_work(ctx, irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1431,7 +1429,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
/* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms);
+ init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
@@ -1479,8 +1477,8 @@ out:
err:
/* Cleanup CXL context; okay to 'stop' even if it was not started */
if (!IS_ERR_OR_NULL(ctx)) {
- cxl_stop_context(ctx);
- cxl_release_context(ctx);
+ cfg->ops->stop_context(ctx);
+ cfg->ops->release_context(ctx);
ctx = NULL;
}
@@ -1529,10 +1527,10 @@ static int recover_context(struct cxlflash_cfg *cfg,
int fd = -1;
int ctxid = -1;
struct file *file;
- struct cxl_context *ctx;
+ void *ctx;
struct afu *afu = cfg->afu;
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1540,21 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_start_work(ctx, &ctxi->work);
+ rc = cfg->ops->start_work(ctx, ctxi->irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err1;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err2;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1601,9 +1599,9 @@ err3:
fput(file);
put_unused_fd(fd);
err2:
- cxl_stop_context(ctx);
+ cfg->ops->stop_context(ctx);
err1:
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
goto out;
}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 0b5976829913..35c3cbf83fb5 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -96,15 +96,15 @@ struct ctx_info {
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
- struct cxl_ioctl_start_work work;
u64 ctxid;
+ u64 irqs; /* Number of interrupts requested for context */
pid_t pid;
bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
struct kref kref;
- struct cxl_context *ctx;
+ void *ctx;
struct cxlflash_cfg *cfg;
struct list_head luns; /* LUNs attached to this context */
const struct vm_operations_struct *cxl_mmap_vmops;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fd22dc6ab5d9..022e421c2185 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
#define TPGS_SUPPORT_OFFLINE 0x40
#define TPGS_SUPPORT_TRANSITION 0x80
+#define TPGS_SUPPORT_ALL 0xdf
#define RTPG_FMT_MASK 0x70
#define RTPG_FMT_EXT_HDR 0x10
@@ -81,6 +82,7 @@ struct alua_port_group {
int tpgs;
int state;
int pref;
+ int valid_states;
unsigned flags; /* used for optimizing STPG */
unsigned char transition_tmo;
unsigned long expiry;
@@ -243,6 +245,7 @@ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
pg->group_id = group_id;
pg->tpgs = tpgs;
pg->state = SCSI_ACCESS_STATE_OPTIMAL;
+ pg->valid_states = TPGS_SUPPORT_ALL;
if (optimize_stpg)
pg->flags |= ALUA_OPTIMIZE_STPG;
kref_init(&pg->kref);
@@ -516,7 +519,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{
struct scsi_sense_hdr sense_hdr;
struct alua_port_group *tmp_pg;
- int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
+ int len, k, off, bufflen = ALUA_RTPG_SIZE;
unsigned char *desc, *buff;
unsigned err, retval;
unsigned int tpg_desc_tbl_off;
@@ -541,6 +544,22 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
if (retval) {
+ /*
+ * Some (broken) implementations have a habit of returning
+ * an error during things like firmware update etc.
+ * But if the target only supports active/optimized there's
+ * not much we can do; it's not that we can switch paths
+ * or anything.
+ * So ignore any errors to avoid spurious failures during
+ * path failover.
+ */
+ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: ignoring rtpg result %d\n",
+ ALUA_DH_NAME, retval);
+ kfree(buff);
+ return SCSI_DH_OK;
+ }
if (!scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
@@ -652,7 +671,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_unlock();
}
if (tmp_pg == pg)
- valid_states = desc[1];
+ tmp_pg->valid_states = desc[1];
spin_unlock_irqrestore(&tmp_pg->lock, flags);
}
kref_put(&tmp_pg->kref, release_port_group);
@@ -665,13 +684,13 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
pg->pref ? "preferred" : "non-preferred",
- valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
- valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
- valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
- valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
- valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
- valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
- valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+ pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
switch (pg->state) {
case SCSI_ACCESS_STATE_TRANSITIONING:
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 5e3d909cfc53..6d3e1cb4fea6 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -108,24 +108,6 @@ void fnic_debugfs_terminate(void)
}
/*
- * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
- * Or Open fc_trace_enable file for fc_trace
- * @inode: The inode pointer.
- * @file: The file pointer to attach the trace enable/disable flag.
- *
- * Description:
- * This routine opens a debugsfs file trace_enable or fc_trace_enable.
- *
- * Returns:
- * This function returns zero if successful.
- */
-static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/*
* fnic_trace_ctrl_read -
* Read trace_enable ,fc_trace_enable
* or fc_trace_clear debugfs file
@@ -220,7 +202,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
static const struct file_operations fnic_trace_ctrl_fops = {
.owner = THIS_MODULE,
- .open = fnic_trace_ctrl_open,
+ .open = simple_open,
.read = fnic_trace_ctrl_read,
.write = fnic_trace_ctrl_write,
};
@@ -632,7 +614,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
- getnstimeofday(&stats->stats_timestamps.last_reset_time);
+ ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 999fc7547560..c7bf316d8e83 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -442,15 +442,13 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
shost_printk(KERN_INFO, fnic->lport->host,
"process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kmalloc(sizeof(*vlan),
- GFP_ATOMIC);
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan) {
/* retry from timer */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
goto out;
}
- memset(vlan, 0, sizeof(struct fcoe_vlan));
vlan->vid = vid & 0x0fff;
vlan->state = FIP_VLAN_AVAIL;
list_add_tail(&vlan->list, &fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 242e2ee494a1..8cbd3c9f0b4c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -906,7 +906,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
- "hdr status = %s tag = 0x%x sc = 0x%p"
+ "hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
fnic_fcpio_status_to_str(hdr_status),
id, sc,
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index e007feedbf72..9daa6ada6fa0 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -18,8 +18,8 @@
#define _FNIC_STATS_H_
struct stats_timestamps {
- struct timespec last_reset_time;
- struct timespec last_read_time;
+ struct timespec64 last_reset_time;
+ struct timespec64 last_read_time;
};
struct io_path_stats {
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4826f596cb31..abddde11982b 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -111,7 +111,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
int len = 0;
unsigned long flags;
char str[KSYM_SYMBOL_LEN];
- struct timespec val;
+ struct timespec64 val;
fnic_trace_data_t *tbp;
spin_lock_irqsave(&fnic_trace_lock, flags);
@@ -129,10 +129,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -140,8 +140,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -171,10 +171,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -182,8 +182,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -217,29 +217,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
{
int len = 0;
int buf_size = debug->buf_size;
- struct timespec val1, val2;
+ struct timespec64 val1, val2;
- getnstimeofday(&val1);
+ ktime_get_real_ts64(&val1);
len = snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Current time : [%ld:%ld]\n"
- "Last stats reset time: [%ld:%ld]\n"
- "Last stats read time: [%ld:%ld]\n"
- "delta since last reset: [%ld:%ld]\n"
- "delta since last read: [%ld:%ld]\n",
- val1.tv_sec, val1.tv_nsec,
- stats->stats_timestamps.last_reset_time.tv_sec,
+ "Current time : [%lld:%ld]\n"
+ "Last stats reset time: [%lld:%09ld]\n"
+ "Last stats read time: [%lld:%ld]\n"
+ "delta since last reset: [%lld:%ld]\n"
+ "delta since last read: [%lld:%ld]\n",
+ (s64)val1.tv_sec, val1.tv_nsec,
+ (s64)stats->stats_timestamps.last_reset_time.tv_sec,
stats->stats_timestamps.last_reset_time.tv_nsec,
- stats->stats_timestamps.last_read_time.tv_sec,
+ (s64)stats->stats_timestamps.last_read_time.tv_sec,
stats->stats_timestamps.last_read_time.tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
stats->stats_timestamps.last_read_time = val1;
@@ -403,12 +403,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
- jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
- jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+ jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Last ISR time: %llu (%8lu.%8lu)\n"
- "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Last ISR time: %llu (%8llu.%09lu)\n"
+ "Last ACK time: %llu (%8llu.%09lu)\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
@@ -425,9 +425,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
(u64)stats->misc_stats.last_isr_time,
- val1.tv_sec, val1.tv_nsec,
+ (s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
- val2.tv_sec, val2.tv_nsec,
+ (s64)val2.tv_sec, val2.tv_nsec,
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 83357b0367d8..e7fd2877c19c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -99,12 +99,43 @@ struct hisi_sas_hw_error {
const struct hisi_sas_hw_error *sub;
};
+struct hisi_sas_rst {
+ struct hisi_hba *hisi_hba;
+ struct completion *completion;
+ struct work_struct work;
+ bool done;
+};
+
+#define HISI_SAS_RST_WORK_INIT(r, c) \
+ { .hisi_hba = hisi_hba, \
+ .completion = &c, \
+ .work = __WORK_INITIALIZER(r.work, \
+ hisi_sas_sync_rst_work_handler), \
+ .done = false, \
+ }
+
+#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
+ DECLARE_COMPLETION_ONSTACK(c); \
+ DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
+ struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
+
+enum hisi_sas_bit_err_type {
+ HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
+ HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
+};
+
+enum hisi_sas_phy_event {
+ HISI_PHYE_PHY_UP = 0U,
+ HISI_PHYE_LINK_RESET,
+ HISI_PHYES_NUM,
+};
+
struct hisi_sas_phy {
+ struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
- struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
u64 frame_rcvd_size;
@@ -205,13 +236,16 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*free_device)(struct hisi_hba *hisi_hba,
+ void (*clear_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
+ void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
struct domain_device *device);
int (*soft_reset)(struct hisi_hba *hisi_hba);
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
+ int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data);
int max_command_entries;
int complete_hdr_size;
};
@@ -225,6 +259,7 @@ struct hisi_hba {
struct device *dev;
void __iomem *regs;
+ void __iomem *sgpio_regs;
struct regmap *ctrl;
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
@@ -409,7 +444,8 @@ extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
+extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
+ int direction);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
@@ -425,5 +461,9 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
+extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event);
+extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5f503cb09508..2d4dbed03ee3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -22,10 +22,12 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
+static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
-u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
+u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
- switch (cmd) {
+ switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_RECV:
@@ -77,10 +79,26 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_ZAC_MGMT_OUT:
return HISI_SAS_SATA_PROTOCOL_NONDATA;
default:
+ {
+ if (fis->command == ATA_CMD_SET_MAX) {
+ switch (fis->features) {
+ case ATA_SET_MAX_PASSWD:
+ case ATA_SET_MAX_LOCK:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_SET_MAX_PASSWD_DMA:
+ case ATA_SET_MAX_UNLOCK_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ default:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ }
+ }
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return HISI_SAS_SATA_PROTOCOL_PIO;
}
+ }
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
@@ -192,7 +210,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter, slot->n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
if (sas_dev)
@@ -431,7 +450,8 @@ err_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
if (!sas_protocol_ata(task->task_proto))
if (n_elem)
- dma_unmap_sg(dev, task->scatter, n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
prep_out:
return rc;
@@ -578,6 +598,9 @@ static int hisi_sas_dev_found(struct domain_device *device)
}
}
+ dev_info(dev, "dev[%d:%x] found\n",
+ sas_dev->device_id, sas_dev->dev_type);
+
return 0;
}
@@ -617,7 +640,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
static void hisi_sas_phyup_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
- container_of(work, struct hisi_sas_phy, phyup_ws);
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -626,10 +649,37 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
}
+static void hisi_sas_linkreset_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
+}
+
+static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
+ [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
+ [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+};
+
+bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+
+ if (WARN_ON(event >= HISI_PHYES_NUM))
+ return false;
+
+ return queue_work(hisi_hba->wq, &phy->works[event]);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
+
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
phy->hisi_hba = hisi_hba;
phy->port = NULL;
@@ -647,7 +697,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
sas_phy->lldd_phy = phy;
- INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
+ for (i = 0; i < HISI_PHYES_NUM; i++)
+ INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -702,7 +753,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
hisi_sas_do_release_task(hisi_hba, slot->task, slot);
}
-static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
+void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
{
struct hisi_sas_device *sas_dev;
struct domain_device *device;
@@ -719,6 +770,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
hisi_sas_release_task(hisi_hba, device);
}
}
+EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
struct domain_device *device)
@@ -733,17 +785,21 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- dev_info(dev, "found dev[%d:%x] is gone\n",
+ dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
- hisi_sas_internal_task_abort(hisi_hba, device,
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- hisi_sas_dereg_device(hisi_hba, device);
+ hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
- device->lldd_dev = NULL;
- memset(sas_dev, 0, sizeof(*sas_dev));
+ hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ device->lldd_dev = NULL;
+ }
+
+ if (hisi_hba->hw->free_device)
+ hisi_hba->hw->free_device(sas_dev);
sas_dev->dev_type = SAS_PHY_UNUSED;
}
@@ -859,12 +915,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- dev_err(dev, "abort tmf: TMF task timeout\n");
+ dev_err(dev, "abort tmf: TMF task timeout and not done\n");
if (slot)
slot->task = NULL;
goto ex_err;
- }
+ } else
+ dev_err(dev, "abort tmf: TMF task timeout\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -985,27 +1042,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
sizeof(ssp_task), tmf);
}
-static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
- struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
+static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
- struct hisi_sas_device *sas_dev;
- struct domain_device *device;
+ u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
int i;
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
- sas_dev = &hisi_hba->devices[i];
- device = sas_dev->sas_device;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
+ struct asd_sas_port *sas_port;
+ struct hisi_sas_port *port;
+ struct hisi_sas_phy *phy = NULL;
+ struct asd_sas_phy *sas_phy;
+
if ((sas_dev->dev_type == SAS_PHY_UNUSED)
- || !device || (device->port != sas_port))
+ || !device || !device->port)
continue;
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
+ sas_port = device->port;
+ port = to_hisi_sas_port(sas_port);
+
+ list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
+ if (state & BIT(sas_phy->id)) {
+ phy = sas_phy->lldd_phy;
+ break;
+ }
+
+ if (phy) {
+ port->id = phy->port_id;
- /* Update linkrate of directly attached device. */
- if (!device->parent)
- device->linkrate = linkrate;
+ /* Update linkrate of directly attached device. */
+ if (!device->parent)
+ device->linkrate = phy->sas_phy.linkrate;
- hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ } else
+ port->id = 0xff;
}
}
@@ -1020,21 +1092,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
bool do_port_check = !!(_sas_port != sas_port);
if (!sas_phy->phy->enabled)
continue;
/* Report PHY state change to libsas */
- if (state & (1 << phy_no)) {
- if (do_port_check && sas_port) {
+ if (state & BIT(phy_no)) {
+ if (do_port_check && sas_port && sas_port->port_dev) {
struct domain_device *dev = sas_port->port_dev;
_sas_port = sas_port;
- port->id = phy->port_id;
- hisi_sas_refresh_port_id(hisi_hba,
- sas_port, sas_phy->linkrate);
if (DEV_IS_EXPANDER(dev->dev_type))
sas_ha->notify_port_event(sas_phy,
@@ -1045,8 +1113,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
hisi_sas_phy_down(hisi_hba, phy_no, 0);
}
-
- drain_workqueue(hisi_hba->shost->work_q);
}
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
@@ -1063,7 +1129,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
return -1;
- dev_dbg(dev, "controller resetting...\n");
+ dev_info(dev, "controller resetting...\n");
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1072,6 +1138,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (rc) {
dev_warn(dev, "controller reset failed (%d)\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
goto out;
}
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1083,15 +1150,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
- drain_workqueue(hisi_hba->wq);
- drain_workqueue(shost->work_q);
+ hisi_sas_refresh_port_id(hisi_hba);
+ scsi_unblock_requests(shost);
state = hisi_hba->hw->get_phys_state(hisi_hba);
hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_dbg(dev, "controller reset complete\n");
+ dev_info(dev, "controller reset complete\n");
out:
- scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
return rc;
@@ -1134,6 +1200,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
+ if (rc2 < 0) {
+ dev_err(dev, "abort task: internal abort (%d)\n", rc2);
+ return TMF_RESP_FUNC_FAILED;
+ }
+
/*
* If the TMF finds that the IO is not in the device and also
* the internal abort does not succeed, then it is safe to
@@ -1151,8 +1222,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task: internal abort failed\n");
+ goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_softreset_ata_disk(device);
}
@@ -1163,7 +1238,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
+ if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
+ task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1178,12 +1254,29 @@ out:
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
tmf_task.tmf = TMF_ABORT_TASK_SET;
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+
return rc;
}
@@ -1213,20 +1306,25 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- unsigned long flags;
+ struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
if (sas_dev->dev_status != HISI_SAS_DEV_EH)
return TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
- hisi_sas_internal_task_abort(hisi_hba, device,
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
+ if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_release_task(hisi_hba, device);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1249,8 +1347,10 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
/* Clear internal IO and then hardreset */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- if (rc == TMF_RESP_FUNC_FAILED)
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
phy = sas_get_local_phy(device);
@@ -1266,6 +1366,14 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
} else {
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
+ goto out;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
+
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1283,8 +1391,14 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- return hisi_sas_controller_reset(hisi_hba);
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return TMF_RESP_FUNC_COMPLETE;
+
+ return TMF_RESP_FUNC_FAILED;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1441,8 +1555,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int res;
+ /*
+ * The interface is not realized means this HW don't support internal
+ * abort, or don't need to do internal abort. Then here, we return
+ * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
+ * the internal abort has been executed and returned CQ.
+ */
if (!hisi_hba->hw->prep_abort)
- return -EOPNOTSUPP;
+ return TMF_RESP_FUNC_FAILED;
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
@@ -1473,9 +1593,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (slot)
slot->task = NULL;
- dev_err(dev, "internal task abort: timeout.\n");
+ dev_err(dev, "internal task abort: timeout and not done.\n");
+ res = -EIO;
goto exit;
- }
+ } else
+ dev_err(dev, "internal task abort: timeout.\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -1507,6 +1629,22 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
+static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
+{
+}
+
+static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ if (!hisi_hba->hw->write_gpio)
+ return -EOPNOTSUPP;
+
+ return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
+ reg_index, reg_count, write_data);
+}
+
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
phy->phy_attached = 0;
@@ -1561,6 +1699,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
+static struct device_attribute *host_attrs[] = {
+ &dev_attr_phy_event_threshold,
+ NULL,
+};
+
static struct scsi_host_template _hisi_sas_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -1580,6 +1723,7 @@ static struct scsi_host_template _hisi_sas_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
};
struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
EXPORT_SYMBOL_GPL(hisi_sas_sht);
@@ -1597,6 +1741,8 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
+ .lldd_port_deformed = hisi_sas_port_deformed,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -1657,6 +1803,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
cq->hisi_hba = hisi_hba;
/* Delivery queue structure */
+ spin_lock_init(&dq->lock);
dq->id = i;
dq->hisi_hba = hisi_hba;
@@ -1803,6 +1950,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
+void hisi_sas_sync_rst_work_handler(struct work_struct *work)
+{
+ struct hisi_sas_rst *rst =
+ container_of(work, struct hisi_sas_rst, work);
+
+ if (!hisi_sas_controller_reset(rst->hisi_hba))
+ rst->done = true;
+ complete(rst->completion);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
+
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -1909,6 +2067,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (IS_ERR(hisi_hba->regs))
goto err_out;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hisi_hba->sgpio_regs))
+ goto err_out;
+ }
+
if (hisi_sas_alloc(hisi_hba, shost)) {
hisi_sas_free(hisi_hba);
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index dc6eca8d6afd..679e76f58a0a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void free_device_v1_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
@@ -1482,7 +1482,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1850,7 +1850,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
.sl_notify = sl_notify_v1_hw,
- .free_device = free_device_v1_hw,
+ .clear_itct = clear_itct_v1_hw,
.prep_smp = prep_smp_v1_hw,
.prep_ssp = prep_ssp_v1_hw,
.get_free_slot = get_free_slot_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 5d3467fd728d..4ccb61e2ae5c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -240,7 +240,12 @@
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -952,7 +957,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -963,10 +968,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
sas_dev->completion = &completion;
- /* SoC bug workaround */
- if (dev_is_sata(sas_dev->sas_device))
- clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
-
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
@@ -981,6 +982,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
}
}
+static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
+{
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ /* SoC bug workaround */
+ if (dev_is_sata(sas_dev->sas_device))
+ clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
+}
+
static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
{
int i, reset_val;
@@ -1177,8 +1187,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe);
hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -2356,6 +2366,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -2400,6 +2411,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
>> CMPLT_HDR_ERR_PHASE_OFF;
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
/* Analyse error happens on which phase TX or RX */
if (ERR_ON_TX_PHASE(err_phase))
@@ -2407,6 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
else if (ERR_ON_RX_PHASE(err_phase))
slot_err_v2_hw(hisi_hba, task, slot, 2);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -2456,7 +2478,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -2517,7 +2539,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -2687,7 +2709,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (!timer_pending(&hisi_hba->timer))
set_link_timer_quirk(hisi_hba);
}
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -2713,10 +2735,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
@@ -2813,6 +2837,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
}
+static const struct hisi_sas_hw_error port_ecc_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF),
+ .msg = "dmac_tx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF),
+ .msg = "dmac_rx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+};
+
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -2829,40 +2880,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
while (irq_msk) {
- if (irq_msk & (1 << phy_no)) {
- u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
-
- if (irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error!\
- (0x%x)",
- dev_name(dev), irq_value1);
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if ((irq_msk & (1 << phy_no)) && irq_value1) {
+ int i;
- if (irq_value2)
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
+ for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_ecc_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
- if (irq_value0) {
- if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
- phy_bcast_v2_hw(phy_no, hisi_hba);
+ if ((irq_msk & (1 << phy_no)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT0, irq_value0
- & (~CHL_INT0_HOTPLUG_TOUT_MSK)
- & (~CHL_INT0_SL_PHY_ENABLE_MSK)
- & (~CHL_INT0_NOT_RDY_MSK));
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
}
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+ }
+
+ if ((irq_msk & (1 << phy_no)) && irq_value0) {
+ if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ phy_bcast_v2_hw(phy_no, hisi_hba);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
}
irq_msk &= ~(1 << phy_no);
phy_no++;
@@ -2906,7 +2972,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_warn(dev, ecc_error->msg, irq_value, val);
+ dev_err(dev, ecc_error->msg, irq_value, val);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
@@ -3015,12 +3081,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
for (; sub->msk || sub->msg; sub++) {
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -3206,7 +3272,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.device_type = SAS_SATA_DEV;
phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
@@ -3392,7 +3458,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
udelay(10);
if (cnt++ > 10) {
- dev_info(dev, "wait axi bus state to idle timeout!\n");
+ dev_err(dev, "wait axi bus state to idle timeout!\n");
return -1;
}
}
@@ -3408,6 +3474,44 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
return 0;
}
+static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct device *dev = hisi_hba->dev;
+ int phy_no, count;
+
+ if (!hisi_hba->sgpio_regs)
+ return -EOPNOTSUPP;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX:
+ count = reg_count * 4;
+ count = min(count, hisi_hba->n_phy);
+
+ for (phy_no = 0; phy_no < count; phy_no++) {
+ /*
+ * GPIO_TX[n] register has the highest numbered drive
+ * of the four in the first byte and the lowest
+ * numbered drive in the fourth byte.
+ * See SFF-8485 Rev. 0.7 Table 24.
+ */
+ void __iomem *reg_addr = hisi_hba->sgpio_regs +
+ reg_index * 4 + phy_no;
+ int data_idx = phy_no + 3 - (phy_no % 4) * 2;
+
+ writeb(write_data[data_idx], reg_addr);
+ }
+
+ break;
+ default:
+ dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
+ reg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
@@ -3415,6 +3519,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.alloc_dev = alloc_dev_quirk_v2_hw,
.sl_notify = sl_notify_v2_hw,
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+ .clear_itct = clear_itct_v2_hw,
.free_device = free_device_v2_hw,
.prep_smp = prep_smp_v2_hw,
.prep_ssp = prep_ssp_v2_hw,
@@ -3434,6 +3539,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
+ .write_gpio = write_gpio_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 19b1f2ffec17..a1f18689729a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -140,6 +140,7 @@
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
#define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
@@ -165,6 +166,8 @@
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
+#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -204,6 +207,13 @@
#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+/* RAS registers need init */
+#define RAS_BASE (0x6000)
+#define SAS_RAS_INTR0 (RAS_BASE)
+#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
+#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
+#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
+
/* HW dma structures */
/* Delivery queue header */
/* dw0 */
@@ -422,7 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -496,6 +506,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
upper_32_bits(hisi_hba->initial_fis_dma));
+
+ /* RAS registers init */
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
}
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -588,7 +602,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v3_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -1033,7 +1047,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
@@ -1138,7 +1152,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct dev_to_host_fis *fis;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
sas_phy->oob_mode = SATA_OOB_MODE;
@@ -1181,7 +1195,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->port_id = port_id;
phy->phy_attached = 1;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -1322,7 +1336,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if (!(irq_value1 & error->irq_msk))
continue;
- dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
error->msg, phy_no, irq_value1);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1331,9 +1345,31 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
CHL_INT1, irq_value1);
}
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
+ if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+
+ }
+
+ if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba,
+ phy_no, STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+ }
+
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2, irq_value2);
+ }
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
@@ -1432,12 +1468,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1542,6 +1578,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -1583,7 +1620,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
/* check for erroneous completion */
if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
+
slot_err_v3_hw(hisi_hba, task, slot);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -1628,7 +1676,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -1653,9 +1701,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
- struct hisi_sas_itct *itct;
struct hisi_sas_complete_v3_hdr *complete_queue;
- u32 rd_point = cq->rd_point, wr_point, dev_id;
+ u32 rd_point = cq->rd_point, wr_point;
int queue = cq->id;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
@@ -1671,38 +1718,11 @@ static void cq_tasklet_v3_hw(unsigned long val)
complete_hdr = &complete_queue[rd_point];
- /* Check for NCQ completion */
- if (complete_hdr->act) {
- u32 act_tmp = complete_hdr->act;
- int ncq_tag_count = ffs(act_tmp);
-
- dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
- CMPLT_HDR_DEV_ID_OFF;
- itct = &hisi_hba->itct[dev_id];
-
- /* The NCQ tags are held in the itct header */
- while (ncq_tag_count) {
- __le64 *ncq_tag = &itct->qw4_15[0];
-
- ncq_tag_count -= 1;
- iptt = (ncq_tag[ncq_tag_count / 5]
- >> (ncq_tag_count % 5) * 12) & 0xfff;
-
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
-
- act_tmp &= ~(1 << ncq_tag_count);
- ncq_tag_count = ffs(act_tmp);
- }
- } else {
- iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
- }
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@@ -1951,7 +1971,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
- .free_device = free_device_v3_hw,
+ .clear_itct = clear_itct_v3_hw,
.sl_notify = sl_notify_v3_hw,
.prep_ssp = prep_ssp_v3_hw,
.prep_smp = prep_smp_v3_hw,
@@ -2157,21 +2177,243 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
scsi_host_put(shost);
}
+static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
+ { .irq_msk = BIT(19), .msg = "HILINK_INT" },
+ { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
+ { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
+ { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
+ { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
+};
+
+static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
+ { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
+ { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
+ { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
+ { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
+ { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
+ { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
+ { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
+ { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
+ { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
+ { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
+ { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
+ { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
+ { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
+ { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
+};
+
+static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *ras_error;
+ bool need_reset = false;
+ u32 irq_value;
+ int i;
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
+ ras_error = &sas_ras_intr0_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
+ ras_error = &sas_ras_intr1_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
+
+ return need_reset;
+}
+
+static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+
+ dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (process_non_fatal_error_v3_hw(hisi_hba))
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+
+ dev_info(dev, "PCI error: slot reset callback!!\n");
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
enum {
/* instances of the controller */
hip08,
};
+static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 device_state, status;
+ int rc;
+ u32 reg_val;
+ unsigned long flags;
+
+ if (!pdev->pm_cap) {
+ dev_err(dev, "PCI PM not supported\n");
+ return -ENODEV;
+ }
+
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_block_requests(shost);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ flush_workqueue(hisi_hba->wq);
+ /* disable DQ/PHY/bus */
+ interrupt_disable_v3_hw(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
+
+ hisi_sas_stop_phys(hisi_hba);
+
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= 0x1;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
+
+ /* wait until bus idle */
+ rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
+ AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+ if (rc) {
+ dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ return rc;
+ }
+
+ hisi_sas_init_mem(hisi_hba);
+
+ device_state = pci_choose_state(pdev, state);
+ dev_warn(dev, "entering operating state [D%d]\n",
+ device_state);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_tasks(hisi_hba);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ sas_suspend_ha(sha);
+ return 0;
+}
+
+static int hisi_sas_v3_resume(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct device *dev = hisi_hba->dev;
+ unsigned int rc;
+ u32 device_state = pdev->current_state;
+
+ dev_warn(dev, "resuming from operating state [D%d]\n",
+ device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ dev_err(dev, "enable device failed during resume (%d)\n", rc);
+
+ pci_set_master(pdev);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+ sas_prep_resume_ha(sha);
+ init_reg_v3_hw(hisi_hba);
+ hisi_hba->hw->phys_init(hisi_hba);
+ sas_resume_ha(sha);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+
+ return 0;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
};
+static const struct pci_error_handlers hisi_sas_err_handler = {
+ .error_detected = hisi_sas_error_detected_v3_hw,
+ .mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
+ .slot_reset = hisi_sas_slot_reset_v3_hw,
+};
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
+ .suspend = hisi_sas_v3_suspend,
+ .resume = hisi_sas_v3_resume,
+ .err_handler = &hisi_sas_err_handler,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fe3a0da3ec97..57bf43e34863 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
+ /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ rcu_barrier();
+
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
@@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->work_q)
destroy_workqueue(shost->work_q);
+ destroy_rcu_head(&shost->rcu);
+
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
@@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_rcu_head(&shost->rcu);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 287e5eb0723f..87b260e403ec 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3518,7 +3518,7 @@ out:
if (rc != IO_OK)
hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
- "Error, could not get enclosure information\n");
+ "Error, could not get enclosure information");
}
static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
@@ -4619,21 +4619,13 @@ sglist_finished:
return 0;
}
-#define BUFLEN 128
static inline void warn_zero_length_transfer(struct ctlr_info *h,
u8 *cdb, int cdb_len,
const char *func)
{
- char buf[BUFLEN];
- int outlen;
- int i;
-
- outlen = scnprintf(buf, BUFLEN,
- "%s: Blocking zero-length request: CDB:", func);
- for (i = 0; i < cdb_len; i++)
- outlen += scnprintf(buf+outlen, BUFLEN - outlen,
- "%02hhx", cdb[i]);
- dev_warn(&h->pdev->dev, "%s\n", buf);
+ dev_warn(&h->pdev->dev,
+ "%s: Blocking zero-length request: CDB:%*phN\n",
+ func, cdb_len, cdb);
}
#define IO_ACCEL_INELIGIBLE 1
@@ -8223,8 +8215,6 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
if (!device)
continue;
- if (!device->scsi3addr)
- continue;
if (!hpsa_vpd_page_supported(h, device->scsi3addr,
HPSA_VPD_LV_IOACCEL_STATUS))
continue;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 0d2f7eb3acb6..b1b1d3a3b173 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -181,7 +181,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
break;
default:
break;
- };
+ }
}
/**
@@ -220,7 +220,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
default:
break;
- };
+ }
}
#else
@@ -464,7 +464,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
default:
vhost->state = state;
break;
- };
+ }
return rc;
}
@@ -500,7 +500,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
default:
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
@@ -515,7 +515,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -526,7 +526,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
}
/**
@@ -1601,7 +1601,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
case IBMVFC_ACTIVE:
result = 0;
break;
- };
+ }
return result;
}
@@ -1856,7 +1856,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
break;
default:
return -ENOTSUPP;
- };
+ }
if (port_id == -1)
return -EINVAL;
@@ -2661,7 +2661,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
- };
+ }
break;
case IBMVFC_AE_LINK_UP:
@@ -2715,7 +2715,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
default:
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
break;
- };
+ }
}
/**
@@ -3351,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
rsp->status, rsp->error, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3451,7 +3451,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3522,7 +3522,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
default:
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
break;
- };
+ }
if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
@@ -3626,7 +3626,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3838,7 +3838,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -4236,7 +4236,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
- };
+ }
return 1;
}
@@ -4464,7 +4464,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
default:
break;
- };
+ }
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2799a6b08f73..c3a76af9f5fa 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -122,7 +122,7 @@ static bool connection_broken(struct scsi_info *vscsi)
cpu_to_be64(buffer[MSG_HI]),
cpu_to_be64(buffer[MSG_LOW]));
- pr_debug("connection_broken: rc %ld\n", h_return_code);
+ dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
if (h_return_code == H_CLOSED)
rc = true;
@@ -210,7 +210,7 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
}
} while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
- pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
+ dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
return rc;
}
@@ -291,9 +291,9 @@ static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
ibmvscsis_delete_client_info(vscsi, false);
}
- pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
return rc;
}
@@ -428,8 +428,8 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->flags |= DISCONNECT_SCHEDULED;
vscsi->flags &= ~SCHEDULE_DISCONNECT;
- pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/*
* check which state we are in and see if we
@@ -540,13 +540,14 @@ static void ibmvscsis_disconnect(struct work_struct *work)
}
if (wait_idle) {
- pr_debug("disconnect start wait, active %d, sched %d\n",
- (int)list_empty(&vscsi->active_q),
- (int)list_empty(&vscsi->schedule_q));
+ dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
+ (int)list_empty(&vscsi->active_q),
+ (int)list_empty(&vscsi->schedule_q));
if (!list_empty(&vscsi->active_q) ||
!list_empty(&vscsi->schedule_q)) {
vscsi->flags |= WAIT_FOR_IDLE;
- pr_debug("disconnect flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
+ vscsi->flags);
/*
* This routine is can not be called with the interrupt
* lock held.
@@ -555,7 +556,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
wait_for_completion(&vscsi->wait_idle);
spin_lock_bh(&vscsi->intr_lock);
}
- pr_debug("disconnect stop wait\n");
+ dev_dbg(&vscsi->dev, "disconnect stop wait\n");
ibmvscsis_adapter_idle(vscsi);
}
@@ -597,8 +598,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
vscsi->flags |= flag_bits;
- pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
- new_state, flag_bits, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
+ new_state, flag_bits, vscsi->flags, vscsi->state);
if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
vscsi->flags |= SCHEDULE_DISCONNECT;
@@ -648,8 +649,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
}
- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
- vscsi->flags, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+ vscsi->flags, vscsi->new_state);
}
/**
@@ -724,7 +725,8 @@ static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
break;
case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
rc = 0;
break;
}
@@ -768,7 +770,7 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
long rc = ADAPT_SUCCESS;
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+ dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
@@ -776,10 +778,10 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (rc == H_SUCCESS) {
vscsi->client_data.partition_number =
be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
} else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
rc = ADAPT_SUCCESS;
}
@@ -813,7 +815,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
if (rc == H_SUCCESS)
vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
else if (rc != H_NOT_FOUND)
- pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
+ dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
+ rc);
vscsi->flags &= PRESERVE_FLAG_FIELDS;
vscsi->rsp_q_timer.timer_pops = 0;
@@ -822,8 +825,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
rc = vio_enable_interrupts(vscsi->dma_dev);
if (rc) {
- pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
return rc;
}
@@ -883,7 +886,7 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
int bytes;
long rc = ADAPT_SUCCESS;
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
/* don't reset, the client did it for us */
if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
@@ -906,7 +909,8 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
}
if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
+ rc);
vscsi->state = ERR_DISCONNECTED;
vscsi->flags |= RESPONSE_Q_DOWN;
@@ -985,14 +989,15 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
/* See if there is a Resume event in the queue */
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
- pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
- vscsi->flags, vscsi->state, (int)crq->valid);
+ dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
+ vscsi->flags, vscsi->state, (int)crq->valid);
if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
0, 0);
if (rc) {
- pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
+ dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
+ rc);
rc = 0;
}
} else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
@@ -1012,7 +1017,7 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
(crq->format != RESUME_FROM_SUSP)))
- pr_err("Invalid element in CRQ after Prepare for Suspend");
+ dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
}
vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
@@ -1036,8 +1041,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
{
long rc = ADAPT_SUCCESS;
- pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
- (int)crq->format, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
+ (int)crq->format, vscsi->flags, vscsi->state);
switch (crq->format) {
case MIGRATED:
@@ -1073,14 +1078,14 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
!list_empty(&vscsi->schedule_q) ||
!list_empty(&vscsi->waiting_rsp) ||
!list_empty(&vscsi->active_q)) {
- pr_debug("debit %d, sched %d, wait %d, active %d\n",
- vscsi->debit,
- (int)list_empty(&vscsi->schedule_q),
- (int)list_empty(&vscsi->waiting_rsp),
- (int)list_empty(&vscsi->active_q));
- pr_warn("connection lost with outstanding work\n");
+ dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
+ vscsi->debit,
+ (int)list_empty(&vscsi->schedule_q),
+ (int)list_empty(&vscsi->waiting_rsp),
+ (int)list_empty(&vscsi->active_q));
+ dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
} else {
- pr_debug("trans_event: SRP Processing, but no outstanding work\n");
+ dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
}
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
@@ -1097,8 +1102,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
break;
case PREPARE_FOR_SUSPEND:
- pr_debug("Prep for Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
+ (int)crq->status);
switch (vscsi->state) {
case ERR_DISCONNECTED:
case WAIT_CONNECTION:
@@ -1119,15 +1124,15 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
- vscsi->state);
+ dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
+ vscsi->state);
break;
}
break;
case RESUME_FROM_SUSP:
- pr_debug("Resume from Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
+ (int)crq->status);
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
} else {
@@ -1152,8 +1157,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
rc = vscsi->flags & SCHEDULE_DISCONNECT;
- pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
- vscsi->flags, vscsi->state, rc);
+ dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
return rc;
}
@@ -1175,8 +1180,8 @@ static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
bool ack = true;
volatile u8 valid;
- pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
rc = vscsi->flags & SCHEDULE_DISCONNECT;
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
@@ -1204,7 +1209,7 @@ poll_work:
* if a tranport event has occurred leave
* everything but transport events on the queue
*/
- pr_debug("poll_cmd_q, ignoring\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
/*
* need to decrement the queue index so we can
@@ -1233,7 +1238,7 @@ poll_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("poll_cmd_q, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
@@ -1241,7 +1246,7 @@ poll_work:
goto poll_work;
}
- pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
}
/**
@@ -1258,9 +1263,9 @@ static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
{
struct ibmvscsis_cmd *cmd, *nxt;
- pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
- (int)list_empty(&vscsi->waiting_rsp),
- vscsi->rsp_q_timer.started);
+ dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
+ (int)list_empty(&vscsi->waiting_rsp),
+ vscsi->rsp_q_timer.started);
list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
list_del(&cmd->list);
@@ -1317,8 +1322,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
int free_qs = false;
long rc = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/* Only need to free qs if we're disconnecting from client */
if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
@@ -1336,7 +1341,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
break;
case ERR_DISCONNECT_RECONNECT:
ibmvscsis_reset_queue(vscsi);
- pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
+ vscsi->flags);
break;
case ERR_DISCONNECT:
@@ -1347,8 +1353,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->state = ERR_DISCONNECTED;
else
vscsi->state = WAIT_ENABLED;
- pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
case WAIT_IDLE:
@@ -1370,15 +1376,15 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->flags &= ~DISCONNECT_SCHEDULED;
}
- pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
ibmvscsis_poll_cmd_q(vscsi);
break;
case ERR_DISCONNECTED:
vscsi->flags &= ~DISCONNECT_SCHEDULED;
- pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
default:
@@ -1419,13 +1425,13 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->phyp_acr_state = 0;
vscsi->phyp_acr_flags = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
- pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->new_state);
}
/**
@@ -1464,8 +1470,8 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
cmd->init_time = mftb();
iue->remote_token = crq->IU_data_ptr;
iue->iu_len = len;
- pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
- be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
+ dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
+ be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
break;
case H_PERMISSION:
if (connection_broken(vscsi))
@@ -1536,10 +1542,10 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
}
- pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
- rc);
- pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
- be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
+ dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
+ rc);
+ dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
+ be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
flag_bits);
goto free_dma;
@@ -1595,7 +1601,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
free_dma:
dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
- pr_debug("Leaving adapter_info, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
return rc;
}
@@ -1629,7 +1635,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
*/
min_len = offsetof(struct capabilities, migration);
if ((olen < min_len) || (olen > PAGE_SIZE)) {
- pr_warn("cap_mad: invalid len %d\n", olen);
+ dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
return 0;
}
@@ -1654,9 +1660,9 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
common = (struct mad_capability_common *)&cap->migration;
while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
- pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
- len, be32_to_cpu(common->cap_type),
- be16_to_cpu(common->length));
+ dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
+ len, be32_to_cpu(common->cap_type),
+ be16_to_cpu(common->length));
cap_len = be16_to_cpu(common->length);
if (cap_len > len) {
@@ -1673,7 +1679,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
switch (common->cap_type) {
default:
- pr_debug("cap_mad: unsupported capability\n");
+ dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
common->server_support = 0;
flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
cap->flags &= ~flag;
@@ -1693,8 +1699,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
be64_to_cpu(mad->buffer));
if (rc != H_SUCCESS) {
- pr_debug("cap_mad: failed to copy to client, rc %ld\n",
- rc);
+ dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
@@ -1702,8 +1708,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
CLIENT_FAILED);
}
- pr_warn("cap_mad: error copying data to client, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
+ rc);
ibmvscsis_post_disconnect(vscsi,
ERR_DISCONNECT_RECONNECT,
flag_bits);
@@ -1712,8 +1718,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
- pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
- rc, vscsi->client_cap);
+ dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
+ rc, vscsi->client_cap);
return rc;
}
@@ -1749,7 +1755,7 @@ static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
vscsi->fast_fail = true;
mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
} else {
- pr_warn("fast fail mad sent after login\n");
+ dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
}
break;
@@ -1809,9 +1815,9 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
*/
if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
(vscsi->state == SRP_PROCESSING)) {
- pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
- vscsi->flags, (int)vscsi->rsp_q_timer.started,
- vscsi->rsp_q_timer.timer_pops);
+ dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
+ vscsi->flags, (int)vscsi->rsp_q_timer.started,
+ vscsi->rsp_q_timer.timer_pops);
/*
* Check if the timer is running; if it
@@ -1947,8 +1953,9 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
- cmd, be64_to_cpu(cmd->rsp.tag), rc);
+ dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag),
+ rc);
/* if all ok free up the command
* element resources
@@ -2003,7 +2010,8 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
list_add_tail(&cmd->list, &vscsi->waiting_rsp);
ibmvscsis_send_messages(vscsi);
} else {
- pr_debug("Error sending mad response, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
@@ -2039,8 +2047,8 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
* expecting a response.
*/
case WAIT_CONNECTION:
- pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
- vscsi->flags);
+ dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
+ vscsi->flags);
return ADAPT_SUCCESS;
case SRP_PROCESSING:
@@ -2075,12 +2083,12 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (!rc) {
mad = (struct mad_common *)&vio_iu(iue)->mad;
- pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+ dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
rc = ibmvscsis_process_mad(vscsi, iue);
- pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
- rc);
+ dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
+ be16_to_cpu(mad->status), rc);
if (!rc)
ibmvscsis_send_mad_resp(vscsi, cmd, crq);
@@ -2088,7 +2096,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving mad, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
return rc;
}
@@ -2211,16 +2219,17 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
{
char *name = tport->tport_name;
struct ibmvscsis_nexus *nexus;
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
int rc;
if (tport->ibmv_nexus) {
- pr_debug("tport->ibmv_nexus already exists\n");
+ dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
return 0;
}
nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
if (!nexus) {
- pr_err("Unable to allocate struct ibmvscsis_nexus\n");
+ dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
return -ENOMEM;
}
@@ -2316,7 +2325,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
cmd->rsp.format = VIOSRP_SRP_FORMAT;
cmd->rsp.tag = req->tag;
- pr_debug("srp_login: reason 0x%x\n", reason);
+ dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
if (reason)
rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
@@ -2333,7 +2342,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving srp_login, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
return rc;
}
@@ -2415,8 +2424,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
case SRP_TSK_MGMT:
tsk = &vio_iu(iue)->srp.tsk_mgmt;
- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
- tsk->tag);
+ dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
+ tsk->tag, tsk->tag);
cmd->rsp.tag = tsk->tag;
vscsi->debit += 1;
cmd->type = TASK_MANAGEMENT;
@@ -2425,8 +2434,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
break;
case SRP_CMD:
- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
- srp->tag);
+ dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
+ srp->tag, srp->tag);
cmd->rsp.tag = srp->tag;
vscsi->debit += 1;
cmd->type = SCSI_CDB;
@@ -2603,7 +2612,7 @@ static int read_dma_window(struct scsi_info *vscsi)
"ibm,my-dma-window",
NULL);
if (!dma_window) {
- pr_err("Couldn't find ibm,my-dma-window property\n");
+ dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
return -1;
}
@@ -2613,7 +2622,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-address-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2622,7 +2631,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-size-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2808,8 +2817,8 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
srp_tsk->lun.scsi_lun[0] &= 0x3f;
- pr_debug("calling submit_tmr, func %d\n",
- srp_tsk->tsk_mgmt_func);
+ dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
+ srp_tsk->tsk_mgmt_func);
rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
scsilun_to_int(&srp_tsk->lun), srp_tsk,
tcm_type, GFP_KERNEL, tag_to_abort, 0);
@@ -3113,8 +3122,8 @@ static long srp_build_response(struct scsi_info *vscsi,
if (cmd->type == SCSI_CDB) {
rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
if (rsp->status) {
- pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
- (int)rsp->status);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
+ cmd, (int)rsp->status);
ibmvscsis_determine_resid(se_cmd, rsp);
if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
rsp->sense_data_len =
@@ -3127,7 +3136,8 @@ static long srp_build_response(struct scsi_info *vscsi,
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else if (cmd->flags & CMD_FAST_FAIL) {
- pr_debug("build_resp: cmd %p, fast fail\n", cmd);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
+ cmd);
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else {
@@ -3340,7 +3350,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
spin_lock_bh(&vscsi->intr_lock);
- pr_debug("got interrupt\n");
+ dev_dbg(&vscsi->dev, "got interrupt\n");
/*
* if we are in a path where we are waiting for all pending commands
@@ -3350,8 +3360,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
- pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
return;
}
@@ -3414,20 +3424,20 @@ cmd_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("handle_crq, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
if (valid)
goto cmd_work;
} else {
- pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
}
- pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
- (int)list_empty(&vscsi->schedule_q), vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
+ (int)list_empty(&vscsi->schedule_q), vscsi->flags,
+ vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
}
@@ -3443,7 +3453,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) {
rc = -ENOMEM;
- pr_err("probe: allocation of adapter failed\n");
+ dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
return rc;
}
@@ -3456,14 +3466,14 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
dev_name(&vdev->dev));
- pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+ dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
rc = read_dma_window(vscsi);
if (rc)
goto free_adapter;
- pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
- vscsi->dds.window[LOCAL].liobn,
- vscsi->dds.window[REMOTE].liobn);
+ dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
+ vscsi->dds.window[LOCAL].liobn,
+ vscsi->dds.window[REMOTE].liobn);
strcpy(vscsi->eye, "VSCSI ");
strncat(vscsi->eye, vdev->name, MAX_EYE);
@@ -3541,8 +3551,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
* client can connect" and the client isn't activated yet.
* We'll make the call again when he sends an init msg.
*/
- pr_debug("probe hrc %ld, client partition num %d\n",
- hrc, vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
+ hrc, vscsi->client_data.partition_number);
tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
(unsigned long)vscsi);
@@ -3602,7 +3612,7 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
{
struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
- pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+ dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
spin_lock_bh(&vscsi->intr_lock);
ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
@@ -3766,14 +3776,16 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
* attempt an srp_transfer_data.
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
- pr_err("write_pending failed since: %d\n", vscsi->flags);
+ dev_err(&vscsi->dev, "write_pending failed since: %d\n",
+ vscsi->flags);
return -EIO;
+
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
1, 1);
if (rc) {
- pr_err("srp_transfer_data() failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
return -EIO;
}
/*
@@ -3811,7 +3823,7 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
1);
if (rc) {
- pr_err("srp_transfer_data failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
sd = se_cmd->sense_buffer;
se_cmd->scsi_sense_length = 18;
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
@@ -3834,7 +3846,7 @@ static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
struct scsi_info *vscsi = cmd->adapter;
uint len;
- pr_debug("queue_status %p\n", se_cmd);
+ dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
srp_build_response(vscsi, cmd, &len);
cmd->rsp.format = SRP_FORMAT;
@@ -3854,8 +3866,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
uint len;
- pr_debug("queue_tm_rsp %p, status %d\n",
- se_cmd, (int)se_cmd->se_tmr_req->response);
+ dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
+ se_cmd, (int)se_cmd->se_tmr_req->response);
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
@@ -3877,8 +3889,12 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
{
- pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
- se_cmd, se_cmd->tag);
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
+ se_cmd, se_cmd->tag);
}
static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
@@ -3886,12 +3902,14 @@ static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
const char *name)
{
struct ibmvscsis_tport *tport;
+ struct scsi_info *vscsi;
tport = ibmvscsis_lookup_port(name);
if (tport) {
+ vscsi = container_of(tport, struct scsi_info, tport);
tport->tport_proto_id = SCSI_PROTOCOL_SRP;
- pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
- name, tport, tport->tport_proto_id);
+ dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
+ name, tport, tport->tport_proto_id);
return &tport->tport_wwn;
}
@@ -3903,9 +3921,10 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
struct ibmvscsis_tport *tport = container_of(wwn,
struct ibmvscsis_tport,
tport_wwn);
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
- pr_debug("drop_tport(%s)\n",
- config_item_name(&tport->tport_wwn.wwn_group.cg_item));
+ dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
+ config_item_name(&tport->tport_wwn.wwn_group.cg_item));
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
@@ -3990,12 +4009,12 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
rc = kstrtoul(page, 0, &tmp);
if (rc < 0) {
- pr_err("Unable to extract srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- pr_err("Illegal value for srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
return -EINVAL;
}
@@ -4004,8 +4023,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
- pr_err("enable_change_state failed, rc %ld state %d\n",
- lrc, vscsi->state);
+ dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
spin_lock_bh(&vscsi->intr_lock);
@@ -4015,7 +4034,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
+ dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
+ vscsi->state);
return count;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cc0187965eee..e07dd990e585 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9653,8 +9653,8 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
if (i == 0) {
entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
ioa_cfg->hrrq[i].min_cmd_id = 0;
- ioa_cfg->hrrq[i].max_cmd_id =
- (entries_each_hrrq - 1);
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
} else {
entries_each_hrrq =
IPR_NUM_BASE_CMD_BLKS/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4d934d6c3e13..6198559abbd8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -307,6 +307,7 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
/**
* iscsi_sw_tcp_xmit - TCP transmit
+ * @conn: iscsi connection
**/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
@@ -357,6 +358,7 @@ error:
/**
* iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ * @conn: iscsi connection
*/
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9c50d2d9f27c..15a2fef51e38 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1979,6 +1988,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2082,7 +2104,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
@@ -2722,8 +2744,10 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
* @iscsit: iscsi transport template
* @shost: scsi host
* @cmds_max: session can queue
+ * @dd_size: private driver data size, added to session allocation size
* @cmd_task_size: LLD task private data size
* @initial_cmdsn: initial CmdSN
+ * @id: target ID to add to this session
*
* This can be used by software iscsi_transports that allocate
* a session per scsi host.
@@ -2951,7 +2975,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_setup);
/**
* iscsi_conn_teardown - teardown iscsi connection
- * cls_conn: iscsi class connection
+ * @cls_conn: iscsi class connection
*
* TODO: we may need to make this into a two step process
* like scsi-mls remove + put host
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 63a1d69ff515..369ef8f23b24 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -798,6 +798,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/**
* iscsi_tcp_hdr_recv_done - process PDU header
+ * @tcp_conn: iSCSI TCP connection
+ * @segment: the buffer segment being processed
*
* This is the callback invoked when the PDU header has
* been received. If the header is followed by additional
@@ -876,9 +878,10 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
* @conn: iscsi connection
* @skb: network buffer with header and/or data segment
* @offset: offset in skb
- * @offload: bool indicating if transfer was offloaded
+ * @offloaded: bool indicating if transfer was offloaded
+ * @status: iscsi TCP status result
*
- * Will return status of transfer in status. And will return
+ * Will return status of transfer in @status. And will return
* number of bytes copied.
*/
int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
@@ -955,9 +958,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
/**
* iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
- * @conn: iscsi connection
* @task: scsi command task
- * @sc: scsi command
*/
int iscsi_tcp_task_init(struct iscsi_task *task)
{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 70be4425ae0b..2b3637b40dde 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -730,7 +730,6 @@ int sas_discover_sata(struct domain_device *dev)
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de66252fa2..e4fd078e4175 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -212,13 +212,9 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
}
}
-static void sas_probe_devices(struct work_struct *work)
+static void sas_probe_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_PROBE, &port->disc.pending);
/* devices must be domain members before link recovery and probe */
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
@@ -294,7 +290,6 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
@@ -353,13 +348,9 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
sas_put_device(dev);
}
-static void sas_destruct_devices(struct work_struct *work)
+void sas_destruct_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_DESTRUCT, &port->disc.pending);
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
list_del_init(&dev->disco_list_node);
@@ -370,6 +361,16 @@ static void sas_destruct_devices(struct work_struct *work)
}
}
+static void sas_destruct_ports(struct asd_sas_port *port)
+{
+ struct sas_port *sas_port, *p;
+
+ list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
+ list_del_init(&sas_port->del_list);
+ sas_port_delete(sas_port);
+ }
+}
+
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
@@ -384,7 +385,6 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
- sas_discover_event(dev->port, DISCE_DESTRUCT);
}
}
@@ -490,6 +490,8 @@ static void sas_discover_domain(struct work_struct *work)
port->port_dev = NULL;
}
+ sas_probe_devices(port);
+
SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
task_pid_nr(current), error);
}
@@ -523,6 +525,10 @@ static void sas_revalidate_domain(struct work_struct *work)
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
+
+ sas_destruct_devices(port);
+ sas_destruct_ports(port);
+ sas_probe_devices(port);
}
/* ---------- Events ---------- */
@@ -534,7 +540,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
* workqueue, or known to be submitted from a context that is
* not racing against draining
*/
- scsi_queue_work(ha->core.shost, &sw->work);
+ queue_work(ha->disco_q, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,
@@ -578,10 +584,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
- [DISCE_PROBE] = sas_probe_devices,
[DISCE_SUSPEND] = sas_suspend_devices,
[DISCE_RESUME] = sas_resume_devices,
- [DISCE_DESTRUCT] = sas_destruct_devices,
};
disc->pending = 0;
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 0bb9eefc08c8..ae923eb6de95 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -29,7 +29,8 @@
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- int rc = 0;
+ /* it's added to the defer_q when draining so return succeed */
+ int rc = 1;
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return 0;
@@ -39,24 +40,20 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
} else
- rc = scsi_queue_work(ha->core.shost, &sw->work);
+ rc = queue_work(ha->event_q, &sw->work);
return rc;
}
-static int sas_queue_event(int event, unsigned long *pending,
- struct sas_work *work,
+static int sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
- int rc = 0;
+ unsigned long flags;
+ int rc;
- if (!test_and_set_bit(event, pending)) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->lock, flags);
- rc = sas_queue_work(ha, work);
- spin_unlock_irqrestore(&ha->lock, flags);
- }
+ spin_lock_irqsave(&ha->lock, flags);
+ rc = sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->lock, flags);
return rc;
}
@@ -64,21 +61,25 @@ static int sas_queue_event(int event, unsigned long *pending,
void __sas_drain_work(struct sas_ha_struct *ha)
{
- struct workqueue_struct *wq = ha->core.shost->work_q;
struct sas_work *sw, *_sw;
+ int ret;
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
spin_unlock_irq(&ha->lock);
- drain_workqueue(wq);
+ drain_workqueue(ha->event_q);
+ drain_workqueue(ha->disco_q);
spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
- sas_queue_work(ha, sw);
+ ret = sas_queue_work(ha, sw);
+ if (ret != 1)
+ sas_free_event(to_asd_sas_event(&sw->work));
+
}
spin_unlock_irq(&ha->lock);
}
@@ -115,33 +116,78 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
struct asd_sas_port *port = ha->sas_port[i];
const int ev = DISCE_REVALIDATE_DOMAIN;
struct sas_discovery *d = &port->disc;
+ struct asd_sas_phy *sas_phy;
if (!test_and_clear_bit(ev, &d->pending))
continue;
- sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+ if (list_empty(&port->phy_list))
+ continue;
+
+ sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
+ port_phy_el);
+ ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
}
mutex_unlock(&ha->disco_mutex);
}
+
+static void sas_port_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_port_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
+static void sas_phy_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_phy_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
- return sas_queue_event(event, &phy->port_events_pending,
- &phy->port_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
- return sas_queue_event(event, &phy->phy_events_pending,
- &phy->phy_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3183d63de4da..6a4f8198b78e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
@@ -1914,7 +1916,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
+ list_add_tail(&phy->port->del_list,
+ &parent->port->sas_port_del_list);
phy->port = NULL;
}
}
@@ -2122,7 +2125,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
- while (res == 0 && dev) {
+ if (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
@@ -2134,9 +2137,6 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
-
- dev = NULL;
- res = sas_find_bcast_dev(port_dev, &dev);
}
return res;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 64fa6f53cb8b..c81a63b5dc71 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -39,6 +39,7 @@
#include "../scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
+static struct kmem_cache *sas_event_cache;
struct sas_task *sas_alloc_task(gfp_t flags)
{
@@ -109,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
+ char name[64];
int error = 0;
mutex_init(&sas_ha->disco_mutex);
@@ -122,6 +124,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
INIT_LIST_HEAD(&sas_ha->defer_q);
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
+ sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
+
error = sas_register_phys(sas_ha);
if (error) {
printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@@ -140,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_ports;
}
+ error = -ENOMEM;
+ snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
+ sas_ha->event_q = create_singlethread_workqueue(name);
+ if (!sas_ha->event_q)
+ goto Undo_ports;
+
+ snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
+ sas_ha->disco_q = create_singlethread_workqueue(name);
+ if (!sas_ha->disco_q)
+ goto Undo_event_q;
+
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
+
+Undo_event_q:
+ destroy_workqueue(sas_ha->event_q);
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
@@ -174,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
+ destroy_workqueue(sas_ha->disco_q);
+ destroy_workqueue(sas_ha->event_q);
+
return 0;
}
@@ -364,8 +385,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
struct asd_sas_phy *phy = ha->sas_phy[i];
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->port_events_pending = 0;
- phy->phy_events_pending = 0;
phy->frame_rcvd_size = 0;
}
}
@@ -537,6 +556,37 @@ static struct sas_function_template sft = {
.smp_handler = sas_smp_handler,
};
+static inline ssize_t phy_event_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
+}
+
+static inline ssize_t phy_event_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ sha->event_thres = simple_strtol(buf, NULL, 10);
+
+ /* threshold cannot be set too small */
+ if (sha->event_thres < 32)
+ sha->event_thres = 32;
+
+ return count;
+}
+
+DEVICE_ATTR(phy_event_threshold,
+ S_IRUGO|S_IWUSR,
+ phy_event_threshold_show,
+ phy_event_threshold_store);
+EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
+
struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *dft)
{
@@ -555,20 +605,71 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+{
+ struct asd_sas_event *event;
+ gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ event = kmem_cache_zalloc(sas_event_cache, flags);
+ if (!event)
+ return NULL;
+
+ atomic_inc(&phy->event_nr);
+
+ if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
+ if (i->dft->lldd_control_phy) {
+ if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
+ sas_printk("The phy%02d bursting events, shut it down.\n",
+ phy->id);
+ sas_notify_phy_event(phy, PHYE_SHUTDOWN);
+ }
+ } else {
+ /* Do not support PHY control, stop allocating events */
+ WARN_ONCE(1, "PHY control not supported.\n");
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+ event = NULL;
+ }
+ }
+
+ return event;
+}
+
+void sas_free_event(struct asd_sas_event *event)
+{
+ struct asd_sas_phy *phy = event->phy;
+
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+}
+
/* ---------- SAS Class register/unregister ---------- */
static int __init sas_class_init(void)
{
sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
if (!sas_task_cache)
- return -ENOMEM;
+ goto out;
+
+ sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
+ if (!sas_event_cache)
+ goto free_task_kmem;
return 0;
+free_task_kmem:
+ kmem_cache_destroy(sas_task_cache);
+out:
+ return -ENOMEM;
}
static void __exit sas_class_exit(void)
{
kmem_cache_destroy(sas_task_cache);
+ kmem_cache_destroy(sas_event_cache);
}
MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index c07e08136491..50e12d662ffe 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
int sas_register_phys(struct sas_ha_struct *sas_ha);
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
+void sas_free_event(struct asd_sas_event *event);
+
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
@@ -98,6 +101,10 @@ int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
+void sas_destruct_devices(struct asd_sas_port *port);
+
+extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
+extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index cdee446c29e1..bf3e1b979ca6 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
sas_deform_port(phy, 1);
}
@@ -45,7 +45,7 @@ static void sas_phye_oob_done(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
}
@@ -58,8 +58,6 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
-
sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
@@ -88,8 +86,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
-
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
@@ -99,8 +95,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
-
/* phew, lldd got the phy back in the nick of time */
if (!phy->suspended) {
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
@@ -113,45 +107,41 @@ static void sas_phye_resume_timeout(struct work_struct *work)
}
+static void sas_phye_shutdown(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (phy->enabled) {
+ int ret;
+
+ phy->error = 0;
+ phy->enabled = 0;
+ ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
+ if (ret)
+ sas_printk("lldd disable phy%02d returned %d\n",
+ phy->id, ret);
+ } else
+ sas_printk("phy%02d is not enabled, cannot shutdown\n",
+ phy->id);
+}
+
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
- static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
- [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
- [PHYE_OOB_DONE] = sas_phye_oob_done,
- [PHYE_OOB_ERROR] = sas_phye_oob_error,
- [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
- [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
-
- };
-
- static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
- [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
- [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
- [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
- [PORTE_TIMER_EVENT] = sas_porte_timer_event,
- [PORTE_HARD_RESET] = sas_porte_hard_reset,
- };
-
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
- int k;
struct asd_sas_phy *phy = sas_ha->sas_phy[i];
phy->error = 0;
+ atomic_set(&phy->event_nr, 0);
INIT_LIST_HEAD(&phy->port_phy_el);
- for (k = 0; k < PORT_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
- phy->port_events[k].phy = phy;
- }
-
- for (k = 0; k < PHY_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
- phy->phy_events[k].phy = phy;
- }
phy->port = NULL;
phy->ha = sas_ha;
@@ -179,3 +169,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
return 0;
}
+
+const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+ [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
+ [PHYE_OOB_DONE] = sas_phye_oob_done,
+ [PHYE_OOB_ERROR] = sas_phye_oob_error,
+ [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+ [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+ [PHYE_SHUTDOWN] = sas_phye_shutdown,
+};
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index d3c5297c6c89..f07e55d3aa73 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -66,6 +66,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
+ sas_destruct_devices(port);
continue;
}
@@ -192,6 +193,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+ flush_workqueue(sas_ha->disco_q);
}
/**
@@ -219,6 +221,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
if (port->num_phys == 1) {
sas_unregister_domain_devices(port, gone);
+ sas_destruct_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else {
@@ -261,8 +264,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
-
sas_form_port(phy);
}
@@ -273,14 +274,15 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
unsigned long flags;
u32 prim;
- clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
-
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
SAS_DPRINTK("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
+
+ if (phy->port)
+ flush_workqueue(phy->port->ha->disco_q);
}
void sas_porte_link_reset_err(struct work_struct *work)
@@ -288,8 +290,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -298,8 +298,6 @@ void sas_porte_timer_event(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -308,8 +306,6 @@ void sas_porte_hard_reset(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -323,6 +319,7 @@ static void sas_init_port(struct asd_sas_port *port,
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
+ INIT_LIST_HEAD(&port->sas_port_del_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
@@ -353,3 +350,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
sas_deform_port(sas_ha->sas_phy[i], 0);
}
+
+const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
+ [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
+ [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
+ [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
+ [PORTE_TIMER_EVENT] = sas_porte_timer_event,
+ [PORTE_HARD_RESET] = sas_porte_hard_reset,
+};
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index c9406852c3e9..6de9681ace82 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include "sas_internal.h"
@@ -959,21 +960,6 @@ void sas_target_destroy(struct scsi_target *starget)
sas_put_device(found_dev);
}
-static void sas_parse_addr(u8 *sas_addr, const char *p)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++) {
- u8 h, l;
- if (!*p)
- break;
- h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- sas_addr[i] = (h<<4) | l;
- }
-}
-
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
@@ -990,7 +976,9 @@ int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
goto out;
}
- sas_parse_addr(addr, fw->data);
+ res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
+ if (res)
+ goto out;
out:
release_firmware(fw);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 231302273257..61fb46da05d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -55,9 +55,10 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
-#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
+#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -705,7 +706,6 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
-#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -945,6 +945,8 @@ struct lpfc_hba {
struct list_head lpfc_nvme_buf_list_get;
struct list_head lpfc_nvme_buf_list_put;
uint32_t total_nvme_bufs;
+ uint32_t get_nvme_bufs;
+ uint32_t put_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82f6e219ee34..d188fb565a32 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,6 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
uint64_t data1, data2, data3, tot;
@@ -198,10 +199,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,6 +242,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -271,6 +283,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
+ lport = (struct lpfc_nvme_lport *)localport->private;
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
@@ -347,9 +360,16 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %016x Cmpl %016x\n",
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
atomic_read(&phba->fc4NvmeLsRequests),
- atomic_read(&phba->fc4NvmeLsCmpls));
+ atomic_read(&phba->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
@@ -360,8 +380,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data1, data2, data3);
len += snprintf(buf+len, PAGE_SIZE-len,
- " Cmpl %016llx Outstanding %016llx\n",
- tot, (data1 + data2 + data3) - tot);
+ " noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " Cmpl %016llx Outstanding %016llx Abort %08x\n",
+ tot, ((data1 + data2 + data3) - tot),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
return len;
}
@@ -3366,12 +3400,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
/*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
* lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq,
- 1, 1, 16,
+ LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
"Specify number of RQ pairs for processing NVMET cmds");
/*
@@ -5139,7 +5174,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/*
@@ -6362,6 +6397,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
}
+ if (!phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
@@ -6369,10 +6407,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
}
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
+
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4e858b38529a..559f9aa0ed08 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,8 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
+void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
+void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f77673ab4a84..9d20d2c208c7 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -685,6 +686,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_els_flush_rscn(vport);
goto out;
}
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_RSCN_DEFERRED) {
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * Skip processing the NS response
+ * Re-issue the NS cmd
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0151 Process Deferred RSCN Data: x%x x%x\n",
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ lpfc_els_handle_rscn(vport);
+
+ goto out;
+ }
+ spin_unlock_irq(shost->host_lock);
+
if (irsp->ulpStatus) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2bf5ad3b1512..17ea3bb04266 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -750,6 +750,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
uint64_t tot, data1, data2, data3;
int len = 0;
int cnt;
@@ -775,10 +777,15 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
}
len += snprintf(buf + len, size - len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, size - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
@@ -812,6 +819,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf + len, size - len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, size - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -885,8 +898,38 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
data1, data2, data3);
len += snprintf(buf + len, size - len,
- " Cmpl %016llx Outstanding %016llx\n",
+ " Cmpl %016llx Outstanding %016llx\n",
tot, (data1 + data2 + data3) - tot);
+
+ localport = vport->localport;
+ if (!localport)
+ return len;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ if (!lport)
+ return len;
+
+ len += snprintf(buf + len, size - len,
+ "LS Xmt Err: Abrt %08x Err %08x "
+ "Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_abort),
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Xmt Err: noxri %06x nondlp %06x "
+ "qdepth %06x wqerr %06x Abrt %06x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+
}
return len;
@@ -3213,7 +3256,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1;
}
- if (eqidx < phba->cfg_nvmet_mrq) {
+ if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
@@ -3246,7 +3289,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx eqd %d]\n",
+ "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
@@ -3366,6 +3409,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
+ qp = phba->sli4_hba.hdr_rq;
+ len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
+ "ELS RQpair", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
+
/* Slow-path NVME LS response CQ */
qp = phba->sli4_hba.nvmels_cq;
len = __lpfc_idiag_print_cq(qp, "NVME LS",
@@ -3383,12 +3432,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
- qp = phba->sli4_hba.hdr_rq;
- len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
- "RQpair", pbuffer, len);
- if (len >= max_cnt)
- goto too_big;
-
goto out;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f9a566eaef04..5a7547f9d8d8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -134,6 +134,8 @@ struct lpfc_nodelist {
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
uint32_t upcall_flags;
+#define NLP_WAIT_FOR_UNREG 0x1
+
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 39d5b146202e..234c7c015982 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -858,6 +858,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
@@ -916,28 +919,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ } else {
/* This side will wait for the PLOGI, decrement ndlp reference
* count indicating that ndlp can be released when other
* references to it are done.
*/
lpfc_nlp_put(ndlp);
- /* If we are pt2pt with another NPort, force NPIV off! */
- phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
+ /* Start discovery - this should just do CLEAR_LA */
+ lpfc_disc_start(vport);
}
return 0;
@@ -1030,30 +1034,31 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
- "Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE)))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2858 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x Data x%x x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, phba->hba_flag,
+ phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- /* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
-
-
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1670,6 +1675,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
+ lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -2088,6 +2094,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
+
+ /* Driver supports multiple FC4 types. Counters matter. */
+ vport->fc_prli_sent--;
+ ndlp->fc4_prli_sent--;
spin_unlock_irq(shost->host_lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2095,9 +2105,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
- /* Ddriver supports multiple FC4 types. Counters matter. */
- vport->fc_prli_sent--;
-
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
@@ -2111,7 +2118,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/* Check for retry */
- ndlp->fc4_prli_sent--;
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
goto out;
@@ -2190,6 +2196,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
+ /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
+ * fields here before any of them can complete.
+ */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ ndlp->nvme_fb_size = 0;
+
send_next_prli:
if (local_nlp_type & NLP_FC4_FCP) {
/* Payload is 4 + 16 = 20 x14 bytes. */
@@ -2298,6 +2313,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
+
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
+ vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
@@ -2308,12 +2330,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
- /* The vport counters are used for lpfc_scan_finished, but
- * the ndlp is used to track outstanding PRLIs for different
- * FC4 types.
- */
- vport->fc_prli_sent++;
- ndlp->fc4_prli_sent++;
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2951,8 +2967,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
-
- lpfc_nlp_put(ndlp);
+ if (!(vport->fc_flag & FC_PT2PT))
+ lpfc_nlp_put(ndlp);
return 0;
}
@@ -6172,9 +6188,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
return 0;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6849,7 +6862,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
/* use the command's xri in the response */
@@ -8060,13 +8073,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
-
- /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
- if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- rjt_exp = LSEXP_REQ_UNSUPPORTED;
- break;
- }
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
@@ -8149,9 +8155,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_nlp_put(ndlp);
break;
case ELS_CMD_REC:
- /* receive this due to exchange closed */
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_exp = LSEXP_INVALID_OX_RX;
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2bafde2b7cfe..b159a5c4e388 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
- if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
- lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -4178,12 +4176,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0)
- /* Start devloss */
- lpfc_nvme_unregister_port(vport, ndlp);
- else
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
+ }
}
}
@@ -4207,11 +4207,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
- * Initiators take the NDLP ref count in
- * the register.
+ * Only NVME Target Rports are registered with
+ * the transport.
*/
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
} else {
/* Just take an NDLP ref count since the
* target does not register rports.
@@ -5838,9 +5840,12 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3185 FIND node filter %p DID "
- "Data: x%p x%x x%x\n",
+ "ndlp %p did x%x flg x%x st x%x "
+ "xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag);
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_xri, ndlp->nlp_type,
+ ndlp->nlp_rpi);
return ndlp;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2b145966c73f..73c2f6971d2b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1122,6 +1122,7 @@ struct cq_context {
#define LPFC_CQ_CNT_256 0x0
#define LPFC_CQ_CNT_512 0x1
#define LPFC_CQ_CNT_1024 0x2
+#define LPFC_CQ_CNT_WORD7 0x3
uint32_t word1;
#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_cq_eq_id_MASK 0x000000FF
@@ -1129,7 +1130,7 @@ struct cq_context {
#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
#define lpfc_cq_eq_id_2_WORD word1
- uint32_t reserved0;
+ uint32_t lpfc_cq_context_count; /* Version 2 Only */
uint32_t reserved1;
};
@@ -1193,6 +1194,9 @@ struct lpfc_mbx_cq_create_set {
#define lpfc_mbx_cq_create_set_arm_SHIFT 31
#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
#define lpfc_mbx_cq_create_set_arm_WORD word2
+#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16
+#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF
+#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2
#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_cq_WORD word2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2b7ea7e53e12..f539c554588c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ int cnt;
lpfc_sli_hbqbuf_free_all(phba);
@@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ cnt = 0;
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
+ cnt++;
}
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ phba->put_nvme_bufs += cnt;
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
@@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
+ phba->put_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
uint16_t i, lxri, els_xri_cnt;
uint16_t nvme_xri_cnt, nvme_xri_max;
LIST_HEAD(nvme_sgl_list);
- int rc;
+ int rc, cnt;
phba->total_nvme_bufs = 0;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return 0;
@@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock_irq(&phba->nvme_buf_list_get_lock);
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = cnt;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize the NVME buffer list used by driver for NVME IO */
spin_lock_init(&phba->nvme_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = 0;
spin_lock_init(&phba->nvme_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
}
/* Initialize the fabric iocb list */
@@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5860,13 +5875,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
/*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}
@@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->nvmet_support) {
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ }
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -7958,10 +7982,10 @@ static int
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- int cnt;
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
}
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
- cnt = LPFC_NVME_WQSIZE;
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -7987,11 +8011,18 @@ static int
lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the CQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
/* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the WQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP WQ (%d)\n",
@@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
@@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path Mailbox Command Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = qdesc;
/* Create slow-path ELS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create Mailbox Command Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path ELS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Create NVME LS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.nvmels_cq = qdesc;
/* Create NVME LS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create Receive Queue for header */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq = qdesc;
/* Create Receive Queue for data */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
- phba->cfg_nvmet_mrq);
+ if (phba->nvmet_support) {
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
- phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
- phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+ }
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
qidx, (uint32_t)rc);
return rc;
}
+ cq->chann = qidx;
if (qtype != LPFC_MBOX) {
/* Setup nvme_cq_map for fast lookup */
@@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
/* no need to tear down cq - caller will do so */
return rc;
}
+ wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
@@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
+ phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
@@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
- /* Unset NVMET MRQ queue */
- if (phba->sli4_hba.nvmet_mrq_hdr) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_rq_destroy(phba,
+ if (phba->nvmet_support) {
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(
+ phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]);
- }
+ }
- /* Unset NVMET CQ Set complete queue */
- if (phba->sli4_hba.nvmet_cqset) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_cq_destroy(phba,
- phba->sli4_hba.nvmet_cqset[qidx]);
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(
+ phba, phba->sli4_hba.nvmet_cqset[qidx]);
+ }
}
/* Unset FCP response complete queue */
@@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- /* Pending NVME XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- &cqelist);
- }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
lpfc_sli4_bar0_register_memmap(phba, if_type);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
- /*
- * Map SLI4 if type 0 HBA Control Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->sli4_hba.ctrl_regs_memmap_p =
- ioremap(phba->pci_bar1_map, bar1map_len);
- if (!phba->sli4_hba.ctrl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA control registers.\n");
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+ /*
+ * Map SLI4 if type 0 HBA Control Register base to a
+ * kernel virtual address and setup the registers.
+ */
+ phba->pci_bar1_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map,
+ bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA "
+ "control registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_conf;
+ }
+ phba->pci_bar2_memmap_p =
+ phba->sli4_hba.ctrl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba);
+ } else {
+ error = -ENOMEM;
goto out_iounmap_conf;
}
- phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
- lpfc_sli4_bar1_register_memmap(phba);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
- /*
- * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->sli4_hba.drbl_regs_memmap_p =
- ioremap(phba->pci_bar2_map, bar2map_len);
- if (!phba->sli4_hba.drbl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA doorbell registers.\n");
- goto out_iounmap_ctrl;
- }
- phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
- error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
- if (error)
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+ /*
+ * Map SLI4 if type 0 HBA Doorbell Register base to
+ * a kernel virtual address and setup the registers.
+ */
+ phba->pci_bar2_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map,
+ bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA"
+ " doorbell registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_ctrl;
+ }
+ phba->pci_bar4_memmap_p =
+ phba->sli4_hba.drbl_regs_memmap_p;
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+ } else {
+ error = -ENOMEM;
goto out_iounmap_all;
+ }
}
return 0;
@@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ /* Driver just aborted IOs during the hba_unset process. Pause
+ * here to give the HBA time to complete the IO and get entries
+ * into the abts lists.
+ */
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+ /* Wait for NVME pending IO to flush back to transport. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_nvme_wait_for_io_drain(phba);
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
@@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
+
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
}
/**
@@ -12138,10 +12227,10 @@ int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
@@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
if (phba->cfg_fof) {
/* Create OAS CQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
@@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
-
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b6957d944b9a..d841aa42f607 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
break;
}
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -727,6 +732,41 @@ out:
return 0;
}
+static uint32_t
+lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct ls_rjt stat;
+ uint32_t *payload;
+ uint32_t cmd;
+
+ payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ cmd = *payload;
+ if (vport->phba->nvmet_support) {
+ /* Must be a NVME PRLI */
+ if (cmd == ELS_CMD_PRLI)
+ goto out;
+ } else {
+ /* Initiator mode. */
+ if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
+ goto out;
+ }
+ return 1;
+out:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+ "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
+ "state x%x flags x%x\n",
+ cmd, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
+ return 0;
+}
+
static void
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -742,9 +782,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lp = (uint32_t *) pcmd->virt;
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
if (npr->initiatorFunc) {
@@ -769,8 +806,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* type. Target mode does not issue gft_id so doesn't get
* the fc4 type set until now.
*/
- if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
+ if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+ if (npr->prliType == PRLI_FCP_TYPE)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
}
if (rport) {
/* We need to update the rport role values */
@@ -1373,7 +1414,8 @@ lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1544,6 +1586,9 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
+ return ndlp->nlp_state;
+ }
if (vport->phba->nvmet_support) {
/* NVME Target mode. Handle and respond to the PRLI and
* transition to UNMAPPED provided the RPI has completed
@@ -1552,28 +1597,22 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else {
/* RPI registration has not completed. Reject the PRLI
* to prevent an illegal state transition when the
* rpi registration does complete.
*/
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
- "6115 NVMET ndlp rpi %d state "
- "unknown, state x%x flags x%08x\n",
- ndlp->nlp_rpi, ndlp->nlp_state,
- ndlp->nlp_flag);
memset(&stat, 0, sizeof(struct ls_rjt));
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, NULL);
+ return ndlp->nlp_state;
}
} else {
/* Initiator mode. */
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
}
-
return ndlp->nlp_state;
}
@@ -1819,6 +1858,8 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1922,13 +1963,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
}
- /* Check out PRLI rsp */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-
- /* NVME or FCP first burst must be negotiated for each PRLI. */
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
- ndlp->nvme_fb_size = 0;
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -1945,8 +1979,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
@@ -1991,8 +2023,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
be32_to_cpu(nvpr->word5),
ndlp->nlp_flag, ndlp->nlp_fcp_info,
ndlp->nlp_type);
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
@@ -2016,7 +2046,8 @@ out_err:
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
- else
+ else if (ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else
lpfc_printf_vlog(vport,
@@ -2241,6 +2272,9 @@ lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
+
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
@@ -2310,6 +2344,8 @@ lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 517ae570e507..81e3a4f10c3c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -57,11 +57,13 @@
/* NVME initiator-based functions */
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite);
static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+static struct nvme_fc_port_template lpfc_nvme_template;
/**
* lpfc_nvme_create_queue -
@@ -88,6 +90,9 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *qhandle;
char *str;
+ if (!pnvme_lport->private)
+ return -ENOMEM;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
@@ -140,6 +145,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
+ if (!pnvme_lport->private)
+ return;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
@@ -154,6 +162,10 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
{
struct lpfc_nvme_lport *lport = localport->private;
+ lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
+ "6173 localport %p delete complete\n",
+ lport);
+
/* release any threads waiting for the unreg to complete */
complete(&lport->lport_unreg_done);
}
@@ -189,16 +201,19 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete complete %p\n",
+ "6146 remoteport delete of remoteport %p\n",
remoteport);
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
+
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
lpfc_nlp_put(ndlp);
rport_err:
- /* This call has to execute as long as the rport is valid.
- * Release any threads waiting for the unreg to complete.
- */
- complete(&rport->rport_unreg_done);
+ return;
}
static void
@@ -206,6 +221,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
+ struct lpfc_nvme_lport *lport;
uint32_t status;
struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr;
@@ -215,6 +231,13 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
@@ -419,6 +442,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -490,6 +516,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
ndlp, 2, 30, 0);
if (ret != WQE_SUCCESS) {
+ atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6052 EXIT. issue ls wqe failed lport %p, "
"rport %p lsreq%p Status %x DID %x\n",
@@ -534,6 +561,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -571,6 +601,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
+ atomic_inc(&lport->xmt_ls_abort);
spin_lock_irq(&phba->hbalock);
list_del_init(&wqe->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, wqe);
@@ -774,8 +805,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct lpfc_nvme_lport *lport;
unsigned long flags;
- uint32_t code;
+ uint32_t code, status;
uint16_t cid, sqhd, data;
uint32_t *ptr;
@@ -790,10 +822,17 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_fcp_xb);
+ atomic_inc(&lport->cmpl_fcp_err);
+ }
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
- bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
+ status, wcqe->parameter);
/*
* Catch race where our node has transitioned, but the
* transport is still transitioning.
@@ -851,8 +890,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
nCmd->transferred_length = nCmd->payload_length;
} else {
- lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
- LPFC_IOCB_STATUS_MASK);
+ lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
@@ -946,10 +984,13 @@ out_err:
freqpriv->nvme_buf = NULL;
/* NVME targets need completion held off until the abort exchange
- * completes.
+ * completes unless the NVME Rport is getting unregistered.
*/
- if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
nCmd->done(nCmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
@@ -1149,7 +1190,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
+ if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
@@ -1239,6 +1280,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct nvmefc_fcp_req *pnvme_fcreq)
{
int ret = 0;
+ int expedite = 0;
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -1246,13 +1288,30 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct nvme_common_command *sqe;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
#endif
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, NULL hw_queue_handle\n");
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
phba = vport->phba;
if (vport->load_flag & FC_UNLOADING) {
@@ -1260,16 +1319,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- /* Validate pointers. */
- if (!pnvme_lport || !pnvme_rport || !freqpriv) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
- "6117 No Send:IO submit ptrs NULL, lport %p, "
- "rport %p fcreq_priv %p\n",
- pnvme_lport, pnvme_rport, freqpriv);
+ if (vport->load_flag & FC_UNLOADING) {
ret = -ENODEV;
goto out_fail;
}
+ freqpriv = pnvme_fcreq->private;
+ if (unlikely(!freqpriv)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
@@ -1293,6 +1353,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6066 Missing node for DID %x\n",
pnvme_rport->port_id);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
@@ -1306,21 +1367,36 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x\n",
rport, pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
+ /* Currently only NVME Keep alive commands should be expedited
+ * if the driver runs out of a resource. These should only be
+ * issued on the admin queue, qidx 0
+ */
+ if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
+ sqe = &((struct nvme_fc_cmd_iu *)
+ pnvme_fcreq->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_keep_alive)
+ expedite = 1;
+ }
+
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
}
- lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
if (lpfc_ncmd == NULL) {
+ atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6065 driver's buffer pool is empty, "
"IO failed\n");
@@ -1373,6 +1449,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
+ atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
@@ -1473,19 +1550,36 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nvme_rport *rport;
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
union lpfc_wqe *abts_wqe;
unsigned long flags;
int ret_val;
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport))
+ return;
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, HW Queue Handle NULL.\n");
+ return;
+ }
+
phba = vport->phba;
+ freqpriv = pnvme_fcreq->private;
+
+ if (unlikely(!freqpriv))
+ return;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
@@ -1552,6 +1646,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ atomic_inc(&lport->xmt_fcp_abort);
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
nvmereq_wqe->sli4_xritag,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
@@ -1931,6 +2026,8 @@ lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -2067,6 +2164,20 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
return num_posted;
}
+static inline struct lpfc_nvme_buf *
+lpfc_nvme_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ list_del_init(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
+ return lpfc_ncmd;
+ }
+ return NULL;
+}
+
/**
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
@@ -2079,35 +2190,27 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* Pointer to lpfc_nvme_buf - Success
**/
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite)
{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ struct lpfc_nvme_buf *lpfc_ncmd = NULL;
unsigned long iflag = 0;
- int found = 0;
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
- if (!found) {
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
+ if (!lpfc_ncmd) {
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice(&phba->lpfc_nvme_buf_list_put,
&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs += phba->put_nvme_bufs;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
- if (!found)
- return NULL;
return lpfc_ncmd;
}
@@ -2145,6 +2248,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs++;
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
}
}
@@ -2221,6 +2325,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
vport->nvmei_support = 1;
+ atomic_set(&lport->xmt_fcp_noxri, 0);
+ atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
+ atomic_set(&lport->xmt_fcp_qdepth, 0);
+ atomic_set(&lport->xmt_fcp_wqerr, 0);
+ atomic_set(&lport->xmt_fcp_abort, 0);
+ atomic_set(&lport->xmt_ls_abort, 0);
+ atomic_set(&lport->xmt_ls_err, 0);
+ atomic_set(&lport->cmpl_fcp_xb, 0);
+ atomic_set(&lport->cmpl_fcp_err, 0);
+ atomic_set(&lport->cmpl_ls_xb, 0);
+ atomic_set(&lport->cmpl_ls_err, 0);
+
/* Don't post more new bufs if repost already recovered
* the nvme sgls.
*/
@@ -2234,6 +2350,47 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
return ret;
}
+/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
+ *
+ * The driver has to wait for the host nvme transport to callback
+ * indicating the localport has successfully unregistered all
+ * resources. Since this is an uninterruptible wait, loop every ten
+ * seconds and print a message indicating no progress.
+ *
+ * An uninterruptible wait is used because of the risk of transport-to-
+ * driver state mismatch.
+ */
+void
+lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
+ struct lpfc_nvme_lport *lport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u32 wait_tmo;
+ int ret;
+
+ /* Host transport has to clean up and confirm requiring an indefinite
+ * wait. Print a message if a 10 second wait expires and renew the
+ * wait. This is unexpected.
+ */
+ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ while (true) {
+ ret = wait_for_completion_timeout(&lport->lport_unreg_done,
+ wait_tmo);
+ if (unlikely(!ret)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6176 Lport %p Localport %p wait "
+ "timed out. Renewing.\n",
+ lport, vport->localport);
+ continue;
+ }
+ break;
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6177 Lport %p Localport %p Complete Success\n",
+ lport, vport->localport);
+#endif
+}
+
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
* @pnvme: pointer to lpfc nvme data structure.
@@ -2268,7 +2425,11 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
*/
init_completion(&lport->lport_unreg_done);
ret = nvme_fc_unregister_localport(localport);
- wait_for_completion_timeout(&lport->lport_unreg_done, 5);
+
+ /* Wait for completion. This either blocks
+ * indefinitely or succeeds
+ */
+ lpfc_nvme_lport_unreg_wait(vport, lport);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@@ -2365,6 +2526,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ if (!ndlp->nrport)
+ lpfc_nlp_get(ndlp);
+
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
/* If the ndlp already has an nrport, this is just
@@ -2373,23 +2537,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
rport = remote_port->private;
if (ndlp->nrport) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "rport wwpn 0x%llx, "
- "Data: x%x x%x x%x x%06x\n",
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- ndlp->nlp_type,
- ndlp->nlp_DID);
+ if (ndlp->nrport == remote_port->private) {
+ /* Same remoteport. Just reuse. */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "remoteport %p wwpn 0x%llx, "
+ "Data: x%x x%x %p x%x x%06x\n",
+ remote_port,
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
+ ndlp,
+ ndlp->nlp_type,
+ ndlp->nlp_DID);
+ return 0;
+ }
prev_ndlp = rport->ndlp;
- /* Sever the ndlp<->rport connection before dropping
- * the ndlp ref from register.
+ /* Sever the ndlp<->rport association
+ * before dropping the ndlp ref from
+ * register.
*/
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
rport->ndlp = NULL;
+ rport->remoteport = NULL;
if (prev_ndlp)
lpfc_nlp_put(ndlp);
}
@@ -2397,19 +2571,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Clean bind the rport to the ndlp. */
rport->remoteport = remote_port;
rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
+ rport->ndlp = ndlp;
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = rport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
+ "lport %p Remoteport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
+ "x%06x Role x%x, ndlp %p\n",
+ lport, remote_port,
rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ rpinfo.port_id, rpinfo.port_role,
+ ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2473,20 +2648,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
*/
- if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
- init_completion(&rport->rport_unreg_done);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
+ ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
+ lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
}
-
}
return;
@@ -2545,8 +2720,11 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
* before the abort exchange command fully completes.
* Once completed, it is available via the put list.
*/
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
@@ -2558,3 +2736,45 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
"6312 XRI Aborted xri x%x not found\n", xri);
}
+
+/**
+ * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all wqes in the nvme rings and frees all resources
+ * in the txcmplq. This function does not issue abort wqes for the IO
+ * commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ u32 i, wait_cnt = 0;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return;
+
+ /* Cycle through all NVME rings and make sure all outstanding
+ * WQEs have been removed from the txcmplqs.
+ */
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+
+ /* Retrieve everything on the txcmplq */
+ while (!list_empty(&pring->txcmplq)) {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_cnt++;
+
+ /* The sleep is 10mS. Every ten seconds,
+ * dump a message. Something is wrong.
+ */
+ if ((wait_cnt % 1000) == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6178 NVME IO not empty, "
+ "cnt %d\n", wait_cnt);
+ }
+ }
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index d192bb268f99..e79f8f75758c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -22,10 +22,12 @@
********************************************************************/
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
+#define LPFC_NVME_WAIT_TMO 10
+#define LPFC_NVME_EXPEDITE_XRICNT 8
+
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
@@ -36,7 +38,18 @@ struct lpfc_nvme_qhandle {
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
- /* Add sttats counters here */
+ /* Add stats counters here */
+ atomic_t xmt_fcp_noxri;
+ atomic_t xmt_fcp_bad_ndlp;
+ atomic_t xmt_fcp_qdepth;
+ atomic_t xmt_fcp_wqerr;
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_ls_abort;
+ atomic_t xmt_ls_err;
+ atomic_t cmpl_fcp_xb;
+ atomic_t cmpl_fcp_err;
+ atomic_t cmpl_ls_xb;
+ atomic_t cmpl_ls_err;
};
struct lpfc_nvme_rport {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 84cf1b9079f7..8dbf5c9d51aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -38,6 +38,7 @@
#include <../drivers/nvme/host/nvme.h>
#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
@@ -126,10 +127,17 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (status)
- atomic_inc(&tgtp->xmt_ls_rsp_error);
- else
- atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ if (tgtp) {
+ if (status) {
+ atomic_inc(&tgtp->xmt_ls_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_ls_rsp_aborted);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
+ } else {
+ atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ }
+ }
out:
rsp = &ctxp->ctx.ls_req;
@@ -218,6 +226,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -253,6 +262,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d "
+ "from %06x\n",
+ oxid, size, sid);
+ /* defer repost rcv buffer till .defer_rcv callback */
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -519,8 +539,11 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (status) {
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
rsp->transferred_length = 0;
- if (tgtp)
+ if (tgtp) {
atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
+ }
logerr = LOG_NVME_IOERR;
@@ -528,6 +551,8 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
ctxp->flag |= LPFC_NVMET_XBUSY;
logerr |= LOG_NVME_ABTS;
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
} else {
ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -635,6 +660,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -721,6 +749,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -823,6 +856,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -910,7 +946,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ else
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1216,6 +1256,8 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_ls_rsp, 0);
atomic_set(&tgtp->xmt_ls_drop, 0);
atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
@@ -1228,7 +1270,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_fcp_release, 0);
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
atomic_set(&tgtp->xmt_abort_unsol, 0);
@@ -1270,6 +1315,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
int rrq_empty = 0;
@@ -1280,6 +1326,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
+
+ if (phba->targetport) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
+ }
+
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1682,6 +1734,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1715,6 +1768,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
@@ -1726,10 +1780,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
- ctxp->rqb_buffer = nvmebuf;
+ ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
+ ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1992,7 +2047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL;
}
- if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 25a65b0bb7f3..5b32c9e4d4ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -25,6 +25,10 @@
#define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12
+#define LPFC_NVMET_MRQ_OFF 0xffff
+#define LPFC_NVMET_MRQ_AUTO 0
+#define LPFC_NVMET_MRQ_MAX 16
+
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
@@ -43,6 +47,8 @@ struct lpfc_nvmet_tgtport {
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_aborted;
+ atomic_t xmt_ls_rsp_xb_set;
atomic_t xmt_ls_rsp_cmpl;
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
@@ -60,12 +66,15 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_fcp_rsp;
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_xb_set;
atomic_t xmt_fcp_rsp_cmpl;
atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_aborted;
atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+ atomic_t xmt_fcp_xri_abort_cqe;
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_abort_cmpl;
atomic_t xmt_abort_sol;
@@ -122,6 +131,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index aecd2399005d..5f5528a12308 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int hq_put_index;
+ int dq_put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
- temp_hrqe = hq->qe[put_index].rqe;
- temp_drqe = dq->qe[dq->host_index].rqe;
+ hq_put_index = hq->host_index;
+ dq_put_index = dq->host_index;
+ temp_hrqe = hq->qe[hq_put_index].rqe;
+ temp_drqe = dq->qe[dq_put_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
- if (put_index != dq->host_index)
+ if (hq_put_index != dq_put_index)
return -EINVAL;
/* If the host has not yet processed the next entry then we are done */
- if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+ if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
- hq->host_index = ((put_index + 1) % hq->entry_count);
- dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+ hq->host_index = ((hq_put_index + 1) % hq->entry_count);
+ dq->host_index = ((dq_put_index + 1) % dq->entry_count);
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
}
writel(doorbell.word0, hq->db_regaddr);
}
- return put_index;
+ return hq_put_index;
}
/**
@@ -12318,41 +12320,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 NVME abort XRI events.
- **/
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
-{
- struct lpfc_cq_event *cq_event;
-
- /* First, declare the fcp xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
- /* Now, handle all the fcp xri abort events */
- while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
- list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support) {
- lpfc_sli4_nvmet_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- } else {
- lpfc_sli4_nvme_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- }
- /* Free the event processed back to the free pool */
- lpfc_sli4_cq_event_release(phba, cq_event);
- }
-}
-
-/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return irspiocbq;
}
+inline struct lpfc_cq_event *
+lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to alloc CQ_EVENT entry\n");
+ return NULL;
+ }
+
+ /* Move the CQE into the event */
+ memcpy(&cq_event->cqe, entry, size);
+ return cq_event;
+}
+
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
* @phba: Pointer to HBA context object.
@@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
"word2:x%x, word3:x%x\n", mcqe->word0,
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0394 Failed to allocate CQ_EVENT entry\n");
+ cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
+ if (!cq_event)
return false;
- }
-
- /* Move the CQE into an asynchronous event entry */
- memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
/* Set the async event flag */
@@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
struct lpfc_cq_event *cq_event;
unsigned long iflags;
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0602 Failed to allocate CQ_EVENT entry\n");
- return false;
- }
-
- /* Move the CQE into the proper xri abort event list */
- memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
switch (cq->subtype) {
case LPFC_FCP:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
workposted = true;
break;
case LPFC_NVME:
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&cq_event->list,
- &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
- /* Set the nvme xri abort event flag */
- phba->hba_flag |= NVME_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ else
+ lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+
+ workposted = false;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
"%08x %08x %08x %08x\n",
cq->subtype, wcqe->word0, wcqe->parameter,
wcqe->word2, wcqe->word3);
- lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
"6126 Receive Frame Truncated!!\n");
/* Drop thru */
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
/**
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
* @phba: The HBA that this queue is being created on.
+ * @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
*
@@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* queue on the HBA.
**/
struct lpfc_queue *
-lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
- uint32_t entry_count)
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+ uint32_t entry_size, uint32_t entry_count)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
@@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = page_size;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
@@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
+
+ /* Set queue parameters now. If the system cannot provide memory
+ * resources, the free routine needs to know what was allocated.
+ */
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+ queue->page_size = hw_page_size;
+ queue->phba = phba;
+
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
@@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->qe[total_qe_count].address = dma_pointer;
}
}
- queue->entry_size = entry_size;
- queue->entry_count = entry_count;
- queue->phba = phba;
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
@@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!cq || !eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.cqv);
if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
- /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
- bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (cq->page_size / SLI4_PAGE_SIZE));
bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
eq->queue_id);
} else {
@@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
eq->queue_id);
}
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ cq_create->u.request.context.lpfc_cq_context_count =
+ cq->entry_count;
+ bf_set(lpfc_cq_context_count,
+ &cq_create->u.request.context,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
- memset(dmabuf->virt, 0, hw_page_size);
+ memset(dmabuf->virt, 0, cq->page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
numcq = phba->cfg_nvmet_mrq;
if (!cqp || !eqp || !numcq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -ENOMEM;
goto out;
}
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = cq->page_size;
switch (idx) {
case 0:
@@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
bf_set(lpfc_mbx_cq_create_set_num_cq,
&cq_set->u.request, numcq);
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ cq->entry_count);
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->host_index = 0;
cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
+ cq->chann = idx;
rc = 0;
list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
void __iomem *bar_memmap_p;
uint32_t db_offset;
uint16_t pci_barset;
+ uint8_t wq_create_version;
/* sanity check on queue memory */
if (!wq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = wq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
+ wq_create_version = LPFC_Q_CREATE_VERSION_1;
+ else
+ wq_create_version = LPFC_Q_CREATE_VERSION_0;
+
+ switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_0:
switch (wq->entry_size) {
default:
@@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
bf_set(lpfc_mbx_wq_create_page_size,
&wq_create->u.request_1,
- LPFC_WQ_PAGE_SIZE_4096);
+ (wq->page_size / SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
break;
default:
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 13b8f4d4da34..81fb58e59e60 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -161,7 +161,6 @@ struct lpfc_queue {
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
- uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
@@ -169,6 +168,11 @@ struct lpfc_queue {
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
uint32_t q_mode;
+ uint16_t page_count; /* Number of pages allocated for this queue */
+ uint16_t page_size; /* size of page allocated for this queue */
+#define LPFC_EXPANDED_PAGE_SIZE 16384
+#define LPFC_DEFAULT_PAGE_SIZE 4096
+ uint16_t chann; /* IO channel this queue is associated with */
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -366,9 +370,9 @@ struct lpfc_bmbx {
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 1024
+#define LPFC_CQE_EXP_COUNT 4096
#define LPFC_WQE_DEF_COUNT 256
-#define LPFC_WQE128_DEF_COUNT 128
-#define LPFC_WQE128_MAX_COUNT 256
+#define LPFC_WQE_EXP_COUNT 1024
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -668,7 +672,6 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
- struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -769,7 +772,7 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
- uint32_t);
+ uint32_t, uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -820,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e0181371af09..c232bf0e8998 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.4"
+#define LPFC_DRIVER_VERSION "11.4.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index f5a36ccb8606..ba6503f37756 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.703.05.00-rc1"
-#define MEGASAS_RELDATE "October 5, 2017"
+#define MEGASAS_VERSION "07.704.04.00-rc1"
+#define MEGASAS_RELDATE "December 7, 2017"
/*
* Device IDs
@@ -197,6 +197,7 @@ enum MFI_CMD_OP {
MFI_CMD_ABORT = 0x6,
MFI_CMD_SMP = 0x7,
MFI_CMD_STP = 0x8,
+ MFI_CMD_NVME = 0x9,
MFI_CMD_OP_COUNT,
MFI_CMD_INVALID = 0xff
};
@@ -230,7 +231,7 @@ enum MFI_CMD_OP {
/*
* Global functions
*/
-extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id);
/*
@@ -1352,7 +1353,13 @@ struct megasas_ctrl_info {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u16 reserved:8;
+ u16 reserved:2;
+ u16 support_nvme_passthru:1;
+ u16 support_pl_debug_info:1;
+ u16 support_flash_comp_info:1;
+ u16 support_host_info:1;
+ u16 support_dual_fw_update:1;
+ u16 support_ssc_rev3:1;
u16 fw_swaps_bbu_vpd_info:1;
u16 support_pd_map_target_id:1;
u16 support_ses_ctrl_in_multipathcfg:1;
@@ -1377,7 +1384,19 @@ struct megasas_ctrl_info {
* provide the data in little endian order
*/
u16 fw_swaps_bbu_vpd_info:1;
- u16 reserved:8;
+ u16 support_ssc_rev3:1;
+ /* FW supports CacheCade 3.0, only one SSCD creation allowed */
+ u16 support_dual_fw_update:1;
+ /* FW supports dual firmware update feature */
+ u16 support_host_info:1;
+ /* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */
+ u16 support_flash_comp_info:1;
+ /* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */
+ u16 support_pl_debug_info:1;
+ /* FW supports retrieval of PL debug information through apps */
+ u16 support_nvme_passthru:1;
+ /* FW supports NVMe passthru commands */
+ u16 reserved:2;
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
@@ -1630,7 +1649,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:18;
+ u32 reserved:17;
+ u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
@@ -1660,7 +1680,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
u32 support_64bit_mode:1;
- u32 reserved:18;
+ u32 support_nvme_passthru:1;
+ u32 reserved:17;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2188,7 +2209,6 @@ struct megasas_instance {
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct mutex hba_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -2269,6 +2289,7 @@ struct megasas_instance {
u32 nvme_page_size;
u8 adapter_type;
bool consistent_mask_64bit;
+ bool support_nvme_passthru;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index cc54bdb5c712..2791141bd035 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -181,6 +181,7 @@ static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
static u32 support_poll_for_event;
u32 megasas_dbg_lvl;
static u32 support_device_change;
+static bool support_nvme_encapsulation;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
@@ -1952,7 +1953,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
}
}
- mutex_lock(&instance->hba_mutex);
+ mutex_lock(&instance->reset_mutex);
/* Send DCMD to Firmware and cache the information */
if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
megasas_get_pd_info(instance, sdev);
@@ -1966,7 +1967,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->hba_mutex);
+ mutex_unlock(&instance->reset_mutex);
/* This sdev property may change post OCR */
megasas_set_dynamic_target_properties(sdev);
@@ -3122,6 +3123,16 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
}
+static ssize_t
+megasas_fw_cmds_outstanding_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
+}
+
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -3132,6 +3143,8 @@ static DEVICE_ATTR(page_size, S_IRUGO,
megasas_page_size_show, NULL);
static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
megasas_ldio_outstanding_show, NULL);
+static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
+ megasas_fw_cmds_outstanding_show, NULL);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
@@ -3139,6 +3152,7 @@ struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_state,
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
+ &dev_attr_fw_cmds_outstanding,
NULL,
};
@@ -3321,6 +3335,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ case MFI_CMD_NVME:
megasas_complete_int_cmd(instance, cmd);
break;
@@ -3331,10 +3346,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
+ status = cmd->frame->hdr.cmd_status;
instance->map_update_cmd = NULL;
- if (cmd->frame->hdr.cmd_status != 0) {
- if (cmd->frame->hdr.cmd_status !=
- MFI_STAT_NOT_FOUND)
+ if (status != MFI_STAT_OK) {
+ if (status != MFI_STAT_NOT_FOUND)
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
@@ -3344,8 +3359,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags);
break;
}
- } else
- instance->map_id++;
+ }
+
megasas_return_cmd(instance, cmd);
/*
@@ -3353,10 +3368,14 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
* Validate Map will set proper value.
* Meanwhile all IOs will go as LD IO.
*/
- if (MR_ValidateMapInfo(instance))
+ if (status == MFI_STAT_OK &&
+ (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
+ instance->map_id++;
fusion->fast_path_io = 1;
- else
+ } else {
fusion->fast_path_io = 0;
+ }
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
@@ -4677,10 +4696,12 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
sizeof(struct megasas_ctrl_info));
if ((instance->adapter_type != MFI_SERIES) &&
- !instance->mask_interrupts)
+ !instance->mask_interrupts) {
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
- else
+ } else {
ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
switch (ret) {
case DCMD_SUCCESS:
@@ -4702,6 +4723,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
ci->adapter_operations4.support_pd_map_target_id;
+ instance->support_nvme_passthru =
+ ci->adapter_operations4.support_nvme_passthru;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4718,6 +4741,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
instance->secure_jbod_support ? "Yes" : "No");
+ dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
+ instance->support_nvme_passthru ? "Yes" : "No");
break;
case DCMD_TIMEOUT:
@@ -5387,7 +5412,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
fusion->stream_detect_by_ld[i] =
- kmalloc(sizeof(struct LD_STREAM_DETECT),
+ kzalloc(sizeof(struct LD_STREAM_DETECT),
GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev,
@@ -5432,7 +5457,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
struct fusion_context *fusion = instance->ctrl_context;
- if (MR_ValidateMapInfo(instance))
+ if (MR_ValidateMapInfo(instance, instance->map_id))
fusion->fast_path_io = 1;
else
fusion->fast_path_io = 0;
@@ -5581,6 +5606,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd;
struct megasas_evt_log_info *el_info;
dma_addr_t el_info_h = 0;
+ int ret;
cmd = megasas_get_cmd(instance);
@@ -5613,26 +5639,29 @@ megasas_get_seq_num(struct megasas_instance *instance,
megasas_set_dma_settings(instance, dcmd, el_info_h,
sizeof(struct megasas_evt_log_info));
- if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
- DCMD_SUCCESS) {
- /*
- * Copy the data back into callers buffer
- */
- eli->newest_seq_num = el_info->newest_seq_num;
- eli->oldest_seq_num = el_info->oldest_seq_num;
- eli->clear_seq_num = el_info->clear_seq_num;
- eli->shutdown_seq_num = el_info->shutdown_seq_num;
- eli->boot_seq_num = el_info->boot_seq_num;
- } else
- dev_err(&instance->pdev->dev, "DCMD failed "
- "from %s\n", __func__);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+ if (ret != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ goto dcmd_failed;
+ }
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = el_info->newest_seq_num;
+ eli->oldest_seq_num = el_info->oldest_seq_num;
+ eli->clear_seq_num = el_info->clear_seq_num;
+ eli->shutdown_seq_num = el_info->shutdown_seq_num;
+ eli->boot_seq_num = el_info->boot_seq_num;
+
+dcmd_failed:
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
megasas_return_cmd(instance, cmd);
- return 0;
+ return ret;
}
/**
@@ -6346,7 +6375,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->hba_mutex);
mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -6704,6 +6732,7 @@ megasas_resume(struct pci_dev *pdev)
*/
atomic_set(&instance->fw_outstanding, 0);
+ atomic_set(&instance->ldio_outstanding, 0);
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
@@ -6822,7 +6851,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
u32 pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
- instance->unload = 1;
host = instance->host;
fusion = instance->ctrl_context;
@@ -6833,6 +6861,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+ instance->unload = 1;
if (megasas_wait_for_adapter_operational(instance))
goto skip_firing_dcmds;
@@ -7004,9 +7033,9 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
/**
* megasas_mgmt_poll - char node "poll" entry point
* */
-static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
poll_wait(file, &megasas_poll_wait, wait);
@@ -7087,7 +7116,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
- if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
+ ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
+ !instance->support_nvme_passthru)) {
dev_err(&instance->pdev->dev,
"Received invalid ioctl command 0x%x\n",
ioc->frame.hdr.cmd);
@@ -7301,9 +7332,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
struct megasas_iocpacket *ioc;
struct megasas_instance *instance;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
ioc = memdup_user(user_ioc, sizeof(*ioc));
if (IS_ERR(ioc))
@@ -7315,10 +7343,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- /* Adjust ioctl wait time for VF mode */
- if (instance->requestorId)
- wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
/* Block ioctls in VF mode */
if (instance->requestorId && !allow_vf_ioctls) {
error = -ENODEV;
@@ -7341,32 +7365,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- break;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting"
- "for controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance)) {
error = -ENODEV;
goto out_up;
}
- spin_unlock_irqrestore(&instance->hba_lock, flags);
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
out_up:
@@ -7382,9 +7384,6 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
struct megasas_instance *instance;
struct megasas_aen aen;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
if (file->private_data != file) {
printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -7408,32 +7407,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
return -ENODEV;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock,
- flags);
- break;
- }
-
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting for"
- "controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance))
return -ENODEV;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
@@ -7613,6 +7588,14 @@ static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
}
static DRIVER_ATTR_RW(dbg_lvl);
+static ssize_t
+support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_nvme_encapsulation);
+}
+
+static DRIVER_ATTR_RO(support_nvme_encapsulation);
+
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
{
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7801,6 +7784,7 @@ static int __init megasas_init(void)
support_poll_for_event = 2;
support_device_change = 1;
+ support_nvme_encapsulation = true;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -7850,8 +7834,17 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_support_device_change;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
+ if (rval)
+ goto err_dcf_support_nvme_encapsulation;
+
return rval;
+err_dcf_support_nvme_encapsulation:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
@@ -7884,6 +7877,8 @@ static void __exit megasas_exit(void)
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
pci_unregister_driver(&megasas_pci_driver);
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index bfad9bfc313f..59ecbb3b53b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
/*
* This function will Populate Driver Map using firmware raid map
*/
-void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
@@ -181,7 +181,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
struct MR_DRV_RAID_MAP_ALL *drv_map =
- fusion->ld_drv_map[(instance->map_id & 1)];
+ fusion->ld_drv_map[(map_id & 1)];
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
void *raid_map_data = NULL;
@@ -190,7 +190,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
if (instance->max_raid_mapsize) {
- fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+ fw_map_dyn = fusion->ld_map[(map_id & 1)];
desc_table =
(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
if (desc_table != fw_map_dyn->raid_map_desc_table)
@@ -255,11 +255,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
} else if (instance->supportmax256vd) {
fw_map_ext =
- (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+ (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
- return;
+ return 1;
}
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
@@ -282,9 +282,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
} else {
fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
- fusion->ld_map[(instance->map_id & 1)];
+ fusion->ld_map[(map_id & 1)];
pFwRaidMap = &fw_map_old->raidMap;
ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+ if (ld_count > MAX_LOGICAL_DRIVES) {
+ dev_dbg(&instance->pdev->dev,
+ "LD count exposed in RAID map in not valid\n");
+ return 1;
+ }
+
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
@@ -300,12 +306,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
sizeof(struct MR_DEV_HANDLE_INFO) *
MAX_RAIDMAP_PHYSICAL_DEVICES);
}
+
+ return 0;
}
/*
* This function will validate Map info data provided by FW
*/
-u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *drv_map;
@@ -317,11 +325,11 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
u16 ld;
u32 expected_size;
-
- MR_PopulateDrvRaidMap(instance);
+ if (MR_PopulateDrvRaidMap(instance, map_id))
+ return 0;
fusion = instance->ctrl_context;
- drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ drv_map = fusion->ld_drv_map[(map_id & 1)];
pDrvRaidMap = &drv_map->raidMap;
lbInfo = fusion->load_balance_info;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 65dc4fea6352..073ced07e662 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -983,7 +983,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
- struct timeval tv;
+ ktime_t time;
bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
@@ -1042,13 +1042,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
- do_gettimeofday(&tv);
+ time = ktime_get_real();
/* Convert to milliseconds as per FW requirement */
- IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
- (tv.tv_usec / 1000));
+ IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
init_frame = (struct megasas_init_frame *)cmd->frame;
- memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
@@ -1080,6 +1079,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+ drv_ops->mfi_capabilities.support_nvme_passthru = 1;
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1320,7 +1320,7 @@ megasas_get_map_info(struct megasas_instance *instance)
fusion->fast_path_io = 0;
if (!megasas_get_ld_map_info(instance)) {
- if (MR_ValidateMapInfo(instance)) {
+ if (MR_ValidateMapInfo(instance, instance->map_id)) {
fusion->fast_path_io = 1;
return 0;
}
@@ -1603,7 +1603,7 @@ static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
if (!cmd) {
dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
@@ -2664,16 +2664,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
if (instance->adapter_type == VENTURA_SERIES) {
- spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
- megasas_stream_detect(instance, cmd, &io_info);
- spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
- /* In ventura if stream detected for a read and it is read ahead
- * capable make this IO as LDIO
- */
- if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
- io_info.isRead && io_info.ra_capable)
- fp_possible = false;
-
/* FP for Optimal raid level 1.
* All large RAID-1 writes (> 32 KiB, both WT and WB modes)
* are built by the driver as LD I/Os.
@@ -2699,6 +2689,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
}
}
+ if (!fp_possible ||
+ (io_info.isRead && io_info.ra_capable)) {
+ spin_lock_irqsave(&instance->stream_lock,
+ spinlock_flags);
+ megasas_stream_detect(instance, cmd, &io_info);
+ spin_unlock_irqrestore(&instance->stream_lock,
+ spinlock_flags);
+ /* In ventura if stream detected for a read and it is
+ * read ahead capable make this IO as LDIO
+ */
+ if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+ fp_possible = false;
+ }
+
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
megasas_set_raidflag_cpu_affinity(praid_context,
@@ -3953,6 +3957,8 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u16 smid;
bool refire_cmd = 0;
+ u8 result;
+ u32 opcode = 0;
fusion = instance->ctrl_context;
@@ -3963,29 +3969,53 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
cmd_fusion = fusion->cmd_list[j];
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
smid = le16_to_cpu(cmd_mfi->context.smid);
+ result = REFIRE_CMD;
if (!smid)
continue;
- /* Do not refire shutdown command */
- if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
- MR_DCMD_CTRL_SHUTDOWN) {
- cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
- megasas_complete_cmd(instance, cmd_mfi, DID_OK);
- continue;
+ req_desc = megasas_get_request_descriptor(instance, smid - 1);
+
+ switch (cmd_mfi->frame->hdr.cmd) {
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
+ /* Do not refire shutdown command */
+ if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+ result = COMPLETE_CMD;
+ break;
+ }
+
+ refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
+ (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+ !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+
+ if (!refire_cmd)
+ result = RETURN_CMD;
+
+ break;
+ case MFI_CMD_NVME:
+ if (!instance->support_nvme_passthru) {
+ cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+ result = COMPLETE_CMD;
+ }
+
+ break;
+ default:
+ break;
}
- req_desc = megasas_get_request_descriptor
- (instance, smid - 1);
- refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
- (cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
- && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
- if (refire_cmd)
+ switch (result) {
+ case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
- else
+ break;
+ case RETURN_CMD:
megasas_return_cmd(instance, cmd_mfi);
+ break;
+ case COMPLETE_CMD:
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ break;
+ }
}
}
@@ -4625,8 +4655,6 @@ transition_to_ready:
continue;
}
- megasas_refire_mgmt_cmd(instance);
-
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
@@ -4635,6 +4663,9 @@ transition_to_ready:
retval = FAILED;
goto out;
}
+
+ megasas_refire_mgmt_cmd(instance);
+
/* Reset load balance info */
if (fusion->load_balance_info)
memset(fusion->load_balance_info, 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 1814d79cb98d..8e5ebee6517f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1344,6 +1344,12 @@ union desc_value {
} u;
};
+enum CMD_RET_VALUES {
+ REFIRE_CMD = 1,
+ COMPLETE_CMD = 2,
+ RETURN_CMD = 3,
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8027de465d47..13d6e4ec3022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -888,6 +888,22 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
return 1;
}
+static struct scsiio_tracker *
+_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct scsi_cmnd *cmd;
+
+ if (WARN_ON(!smid) ||
+ WARN_ON(smid >= ioc->hi_priority_smid))
+ return NULL;
+
+ cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (cmd)
+ return scsi_cmd_priv(cmd);
+
+ return NULL;
+}
+
/**
* _base_get_cb_idx - obtain the callback index
* @ioc: per adapter object
@@ -899,19 +915,25 @@ static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx;
+ u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ u8 cb_idx = 0xFF;
if (smid < ioc->hi_priority_smid) {
- i = smid - 1;
- cb_idx = ioc->scsi_lookup[i].cb_idx;
+ struct scsiio_tracker *st;
+
+ if (smid < ctl_smid) {
+ st = _get_st_from_smid(ioc, smid);
+ if (st)
+ cb_idx = st->cb_idx;
+ } else if (smid == ctl_smid)
+ cb_idx = ioc->ctl_cb_idx;
} else if (smid < ioc->internal_smid) {
i = smid - ioc->hi_priority_smid;
cb_idx = ioc->hpr_lookup[i].cb_idx;
} else if (smid <= ioc->hba_queue_depth) {
i = smid - ioc->internal_smid;
cb_idx = ioc->internal_lookup[i].cb_idx;
- } else
- cb_idx = 0xFF;
+ }
return cb_idx;
}
@@ -1287,14 +1309,16 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
/**
* _base_get_chain_buffer_tracker - obtain chain tracker
* @ioc: per adapter object
- * @smid: smid associated to an IO request
+ * @scmd: SCSI commands of the IO request
*
* Returns chain tracker(from ioc->free_chain_list)
*/
static struct chain_tracker *
-_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
{
struct chain_tracker *chain_req;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1307,8 +1331,7 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
chain_req = list_entry(ioc->free_chain_list.next,
struct chain_tracker, tracker_list);
list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->scsi_lookup[smid - 1].chain_list);
+ list_add_tail(&chain_req->tracker_list, &st->chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return chain_req;
}
@@ -1923,7 +1946,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
/* initializing the chain flags and pointers */
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -1963,7 +1986,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2066,7 +2089,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
}
/* initializing the pointers */
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2097,7 +2120,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2742,7 +2765,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
+ return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
}
/**
@@ -2755,7 +2778,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
+ return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
}
/**
@@ -2822,26 +2845,15 @@ u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
- unsigned long flags;
- struct scsiio_tracker *request;
+ struct scsiio_tracker *request = scsi_cmd_priv(scmd);
+ unsigned int tag = scmd->request->tag;
u16 smid;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->free_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
- return 0;
- }
-
- request = list_entry(ioc->free_list.next,
- struct scsiio_tracker, tracker_list);
- request->scmd = scmd;
+ smid = tag + 1;
request->cb_idx = cb_idx;
- smid = request->smid;
request->msix_io = _base_get_msix_index(ioc);
- list_del(&request->tracker_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ request->smid = smid;
+ INIT_LIST_HEAD(&request->chain_list);
return smid;
}
@@ -2874,6 +2886,35 @@ mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
return smid;
}
+static void
+_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
+{
+ /*
+ * See _wait_for_commands_to_complete() call with regards to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ if (ioc->pending_io_count == 0)
+ wake_up(&ioc->reset_wq);
+ }
+}
+
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st)
+{
+ if (WARN_ON(st->smid == 0))
+ return;
+ st->cb_idx = 0xFF;
+ st->direct_io = 0;
+ if (!list_empty(&st->chain_list)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ list_splice_init(&st->chain_list, &ioc->free_chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ }
+}
+
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
@@ -2886,37 +2927,22 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
unsigned long flags;
int i;
- struct chain_tracker *chain_req, *next;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (smid < ioc->hi_priority_smid) {
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add(&chain_req->tracker_list,
- &ioc->free_chain_list);
- }
- }
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ struct scsiio_tracker *st;
- /*
- * See _wait_for_commands_to_complete() call with regards
- * to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
+ st = _get_st_from_smid(ioc, smid);
+ if (!st) {
+ _base_recovery_check(ioc);
+ return;
}
+ mpt3sas_base_clear_st(ioc, st);
+ _base_recovery_check(ioc);
return;
- } else if (smid < ioc->internal_smid) {
+ }
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->internal_smid) {
/* hi-priority */
i = smid - ioc->hi_priority_smid;
ioc->hpr_lookup[i].cb_idx = 0xFF;
@@ -3789,13 +3815,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->pcie_sgl_dma_pool) {
for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
- pci_pool_free(ioc->pcie_sgl_dma_pool,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ dma_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
if (ioc->pcie_sgl_dma_pool)
- pci_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -3806,10 +3831,6 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- if (ioc->scsi_lookup) {
- free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
- ioc->scsi_lookup = NULL;
- }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
@@ -4110,16 +4131,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
- ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->scsi_lookup_pages);
- if (!ioc->scsi_lookup) {
- pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
- ioc->name, (int)sz);
- goto out;
- }
-
dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
ioc->name, ioc->request, ioc->scsiio_depth));
@@ -4202,23 +4213,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
nvme_blocks_needed++;
+ sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
+ ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
+ if (!ioc->pcie_sg_lookup) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ goto out;
+ }
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
- pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_create failed\n",
+ "PCIe SGL pool: dma_pool_create failed\n",
ioc->name);
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
- pci_pool_alloc(ioc->pcie_sgl_dma_pool,
- GFP_KERNEL,
- &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
- if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
+ ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
+ ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_alloc failed\n",
+ "PCIe SGL pool: dma_pool_alloc failed\n",
ioc->name);
goto out;
}
@@ -5766,19 +5783,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
kfree(delayed_event_ack);
}
- /* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- INIT_LIST_HEAD(&ioc->free_list);
- smid = 1;
- for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
- INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].smid = smid;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
- }
/* hi-priority queue */
INIT_LIST_HEAD(&ioc->hpr_free_list);
@@ -6292,15 +6297,13 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* _wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
*
- * This function waiting(3s) for all pending commands to complete
+ * This function is waiting 10s for all pending commands to complete
* prior to putting controller in reset.
*/
static void
_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
- unsigned long flags;
- u16 i;
ioc->pending_io_count = 0;
@@ -6309,11 +6312,7 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = 0; i < ioc->scsiio_depth; i++)
- if (ioc->scsi_lookup[i].cb_idx != 0xFF)
- ioc->pending_io_count++;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
if (!ioc->pending_io_count)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 60f42ca3954f..789bc421424b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -772,20 +772,17 @@ struct chain_tracker {
/**
* struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
- * @scmd: scsi request pointer
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
- * @tracker_list: list of free request (ioc->free_list)
+ * @chain_list: list of associated firmware chain tracker
* @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
- struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
- struct list_head tracker_list;
u16 msix_io;
};
@@ -1248,10 +1245,8 @@ struct MPT3SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct scsiio_tracker *scsi_lookup;
- ulong scsi_lookup_pages;
+ struct pcie_sg_list *pcie_sg_lookup;
spinlock_t scsi_lookup_lock;
- struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -1270,6 +1265,7 @@ struct MPT3SAS_ADAPTER {
u16 chains_needed_per_io;
u32 chain_depth;
u16 chain_segment_sz;
+ u16 chains_per_prp_buffer;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -1401,7 +1397,9 @@ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
- struct scsi_cmnd *scmd);
+ struct scsi_cmnd *scmd);
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st);
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1437,16 +1435,16 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
+struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
@@ -1613,14 +1611,9 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
struct _raid_device *raid_device);
-u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid);
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index b4c374b08e5e..9cddc3074cd1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -534,7 +534,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
* @wait -
*
*/
-static unsigned int
+static __poll_t
_ctl_poll(struct file *filep, poll_table *wait)
{
struct MPT3SAS_ADAPTER *ioc;
@@ -567,11 +567,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
- u16 i;
+ u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
struct MPT3SAS_DEVICE *priv_data;
- unsigned long flags;
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
@@ -587,11 +586,11 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
handle = le16_to_cpu(tm_request->DevHandle);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = ioc->scsiio_depth; i && !found; i--) {
- scmd = ioc->scsi_lookup[i - 1].scmd;
- if (scmd == NULL || scmd->device == NULL ||
- scmd->device->hostdata == NULL)
+ for (smid = ioc->scsiio_depth; smid && !found; smid--) {
+ struct scsiio_tracker *st;
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
continue;
if (lun != scmd->device->lun)
continue;
@@ -600,10 +599,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
continue;
if (priv_data->sas_target->handle != handle)
continue;
- tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ st = scsi_cmd_priv(scmd);
+ tm_request->TaskMID = cpu_to_le16(st->smid);
found = 1;
}
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
dctlprintk(ioc, pr_info(MPT3SAS_FMT
@@ -724,14 +723,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
goto out;
}
} else {
-
- smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
- if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- ret = -EAGAIN;
- goto out;
- }
+ /* Use first reserved smid for passthrough ioctls */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
}
ret = 0;
@@ -1081,8 +1074,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
mpt3sas_scsih_issue_locked_tm(ioc,
- le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b258f210120a..74fca184dba9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1445,156 +1445,31 @@ _scsih_is_nvme_device(u32 device_info)
}
/**
- * _scsih_scsi_lookup_get - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-static struct scsi_cmnd *
-_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].scmd;
-}
-
-/**
- * __scsih_scsi_lookup_get_clear - returns scmd entry without
- * holding any lock.
+ * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
-static inline struct scsi_cmnd *
-__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
- u16 smid)
+struct scsi_cmnd *
+mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
+ struct scsiio_tracker *st;
- swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
-
- return scmd;
-}
-
-/**
- * _scsih_scsi_lookup_get_clear - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- * Then will derefrence the stored scmd pointer.
- */
-static inline struct scsi_cmnd *
-_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- unsigned long flags;
- struct scsi_cmnd *scmd;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- return scmd;
-}
+ if (smid > 0 &&
+ smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
+ u32 unique_tag = smid - 1;
-/**
- * _scsih_scsi_lookup_find_by_scmd - scmd lookup
- * @ioc: per adapter object
- * @smid: system request message index
- * @scmd: pointer to scsi command object
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a scmd pointer in the scsi_lookup array,
- * returning the revelent smid. A returned value of zero means invalid.
- */
-static u16
-_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
- *scmd)
-{
- u16 smid;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- smid = 0;
- for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd == scmd) {
- smid = ioc->scsi_lookup[i].smid;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_target - search for matching channel:id
- * @ioc: per adapter object
- * @id: target id
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
- int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel)) {
- found = 1;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
- * @ioc: per adapter object
- * @id: target id
- * @lun: lun number
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id:lun in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
- unsigned int lun, int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel &&
- ioc->scsi_lookup[i].scmd->device->lun == lun)) {
- found = 1;
- goto out;
+ scmd = scsi_host_find_tag(ioc->shost, unique_tag);
+ if (scmd) {
+ st = scsi_cmd_priv(scmd);
+ if (st->cb_idx == 0xFF)
+ scmd = NULL;
}
}
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
+ return scmd;
}
/**
@@ -2727,32 +2602,30 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
- * @device_handle: device handle
- * @channel: the channel assigned by the OS
- * @id: the id assigned by the OS
+ * @handle: device handle
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
+ * @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
* Context: user
*
* A generic API for sending task management requests to firmware.
*
* The callback index is set inside `ioc->tm_cb_idx`.
+ * The caller is responsible to check for outstanding commands.
*
* Return SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
- uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
- struct scsiio_tracker *scsi_lookup = NULL;
int rc;
- u16 msix_task = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2791,9 +2664,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
return FAILED;
}
- if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
-
dtmprintk(ioc, pr_info(MPT3SAS_FMT
"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
ioc->name, handle, type, smid_task));
@@ -2809,11 +2679,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
- (scsi_lookup->msix_io < ioc->reply_queue_count))
- msix_task = scsi_lookup->msix_io;
- else
- msix_task = 0;
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2847,35 +2712,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
-
- switch (type) {
- case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- rc = SUCCESS;
- if (scsi_lookup->scmd == NULL)
- break;
- rc = FAILED;
- break;
-
- case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
- rc = SUCCESS;
- break;
- default:
- rc = FAILED;
- break;
- }
+ rc = SUCCESS;
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2884,13 +2721,13 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
- smid_task, timeout);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
+ msix_task, timeout);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2989,7 +2826,7 @@ scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- u16 smid;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
u16 handle;
int r;
@@ -3007,9 +2844,8 @@ scsih_abort(struct scsi_cmnd *scmd)
goto out;
}
- /* search for the command */
- smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
- if (!smid) {
+ /* check for completed command */
+ if (st == NULL || st->cb_idx == 0xFF) {
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -3027,10 +2863,12 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30);
+ /* Command must be cleared after abort */
+ if (r == SUCCESS && st->cb_idx != 0xFF)
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3086,10 +2924,11 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3148,10 +2987,11 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&starget->target_busy))
+ r = FAILED;
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -4600,16 +4440,18 @@ static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
{
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 smid;
- u16 count = 0;
+ int count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
count++;
_scsih_set_satl_pending(scmd, false);
- mpt3sas_base_free_smid(ioc, smid);
+ st = scsi_cmd_priv(scmd);
+ mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
scmd->result = DID_NO_CONNECT << 16;
@@ -4758,19 +4600,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4796,6 +4625,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4823,6 +4665,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4854,6 +4697,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
pcie_device = sas_target_priv_data->pcie_dev;
if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
@@ -4862,7 +4706,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
mpt3sas_setup_direct_io(ioc, scmd,
- raid_device, mpi_request, smid);
+ raid_device, mpi_request);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -5330,6 +5174,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 ioc_status;
u32 xfer_cnt;
u8 scsi_state;
@@ -5337,16 +5182,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
- unsigned long flags;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
- ioc->got_task_abort_from_ioctl)
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
- else
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
-
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL)
return 1;
@@ -5371,13 +5210,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (mpt3sas_scsi_direct_io_get(ioc, smid) &&
+ st = scsi_cmd_priv(scmd);
+ if (st->direct_io &&
((ioc_status & MPI2_IOCSTATUS_MASK)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->scsi_lookup[smid - 1].scmd = scmd;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- mpt3sas_scsi_direct_io_set(ioc, smid, 0);
+ st->direct_io = 0;
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5555,9 +5392,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
out:
scsi_dma_unmap(scmd);
-
+ mpt3sas_base_free_smid(ioc, smid);
scmd->scsi_done(scmd);
- return 1;
+ return 0;
}
/**
@@ -7211,7 +7048,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* Context: user.
*
*/
-static int
+static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
@@ -7221,7 +7058,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u8 link_rate, prev_link_rate;
unsigned long flags;
int rc;
- int requeue_event;
Mpi26EventDataPCIeTopologyChangeList_t *event_data =
(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
struct _pcie_device *pcie_device;
@@ -7231,12 +7067,12 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery)
- return 0;
+ return;
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
ioc->name));
- return 0;
+ return;
}
/* handle siblings events */
@@ -7244,10 +7080,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"ignoring switch event\n", ioc->name));
- return 0;
+ return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
- return 0;
+ return;
reason_code = event_data->PortEntry[i].PortStatus;
handle =
le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
@@ -7316,7 +7152,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
}
}
- return requeue_event;
}
/**
@@ -7502,6 +7337,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
+ struct scsiio_tracker *st;
u16 smid, handle;
u32 lun;
struct MPT3SAS_DEVICE *sas_device_priv_data;
@@ -7543,9 +7379,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
if (ioc->shost_recovery)
goto out;
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
+ st = scsi_cmd_priv(scmd);
sdev = scmd->device;
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
@@ -7567,8 +7404,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
+ st->msix_io, 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7607,10 +7445,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
- sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
- 30);
- if (r == FAILED) {
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
+ st->msix_io, 30);
+ if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
@@ -10416,6 +10254,7 @@ static struct scsi_host_template mpt2sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 2.0 HBA devices */
@@ -10454,6 +10293,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 3.0 HBA devices */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index ced7d9f6274c..6bfcee4757e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -261,33 +261,6 @@ out_error:
}
/**
- * mpt3sas_scsi_direct_io_get - returns direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-inline u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].direct_io;
-}
-
-/**
- * mpt3sas_scsi_direct_io_set - sets direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- * @direct_io: Zero or non-zero value to set in the direct_io flag
- *
- * Returns Nothing.
- */
-inline void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
-{
- ioc->scsi_lookup[smid - 1].direct_io = direct_io;
-}
-
-/**
* mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
* @ioc: per adapter object
* @scmd: pointer to scsi command object
@@ -299,12 +272,12 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid)
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
u32 stripe_sz, stripe_exp;
u8 num_pds, cmd = scmd->cmnd[0];
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
if (cmd != READ_10 && cmd != WRITE_10 &&
cmd != READ_16 && cmd != WRITE_16)
@@ -340,5 +313,5 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
else
put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
- mpt3sas_scsi_direct_io_set(ioc, smid, 1);
+ st->direct_io = 1;
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e58be98430b0..201c8de1853d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5216,7 +5216,7 @@ static unsigned short pmcraid_get_minor(void)
{
int minor;
- minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
+ minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
__set_bit(minor, pmcraid_minor);
return minor;
}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 7be5823ab036..ee86a0c62dbf 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -724,6 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
cmd->SCp.phase++;
+ /* fall through */
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -733,6 +734,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
cmd->SCp.phase++;
+ /* fall through */
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
@@ -746,6 +748,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
+ /* fall through */
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
@@ -758,6 +761,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
cmd->SCp.phase++;
+ /* fall through */
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 40800dd17d2f..ccd9a08ea030 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3126,6 +3126,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
if (!qedf->cmd_mgr) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
+ rc = -ENOMEM;
goto err5;
}
@@ -3149,6 +3150,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
create_workqueue(host_buf);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
@@ -3192,6 +3194,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 092e8f9a6020..667d7697ba01 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -198,7 +198,7 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
qedi_cmd = task->dd_data;
- qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
if (!qedi_cmd->tmf_resp_buf) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to allocate resp buf, cid=0x%x\n",
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index a0002232a83f..029e2e69b29f 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -997,7 +997,9 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
if (ret)
- continue;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Dropping CQE 0x%x for cid=0x%x.\n",
+ que->cq_cons_idx, cqe->cqe_common.conn_id);
que->cq_cons_idx++;
if (que->cq_cons_idx == QEDI_CQ_SIZE)
@@ -1269,16 +1271,14 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
- PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
return -ENOMEM;
}
- memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
/*
* Now populate PBL list with pages that contain pointers to the
@@ -1368,11 +1368,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1380,14 +1379,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq, 0,
- qedi->global_queues[i]->cq_mem_size);
-
- qedi->global_queues[i]->cq_pbl =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1395,8 +1390,6 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq_pbl, 0,
- qedi->global_queues[i]->cq_pbl_size);
/* Create PBL */
num_pages = qedi->global_queues[i]->cq_mem_size /
@@ -1457,25 +1450,22 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- memset(ep->sq, 0, ep->sq_mem_size);
-
- ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
rval = -ENOMEM;
goto out_free_sq;
}
- memset(ep->sq_pbl, 0, ep->sq_pbl_size);
/* Create PBL */
num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9ce28c4f9812..89a4999fa631 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1843,14 +1843,13 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_reset_active(vha))
goto done;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
goto done;
}
- memset(stats, 0, sizeof(*stats));
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
@@ -2170,6 +2169,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
+ vfree(vha->scan.l);
+
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index e3ac7078d2aa..e2d5d3ca0f57 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1435,7 +1435,7 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
ha->optrom_state = QLA_SREADING;
}
- ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ ha->optrom_buffer = vzalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
@@ -1445,7 +1445,6 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
return -ENOMEM;
}
- memset(ha->optrom_buffer, 0, ha->optrom_region_size);
return 0;
}
@@ -2314,16 +2313,14 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
return -ENOMEM;
}
- memset(stats, 0, sizeof(*stats));
-
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
if (rval == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 01a9b8971e88..be7d6824581a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -246,8 +246,8 @@
* There is no correspondence between an N-PORT id and an AL_PA. Therefore the
* valid range of an N-PORT id is 0 through 0x7ef.
*/
-#define NPH_LAST_HANDLE 0x7ef
-#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
+#define NPH_LAST_HANDLE 0x7ee
+#define NPH_MGMT_SERVER 0x7ef /* FFFFEF */
#define NPH_SNS 0x7fc /* FFFFFC */
#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -288,6 +288,8 @@ struct name_list_extended {
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
#define FW_DEF_EXCHANGES_CNT 2048
+#define FW_MAX_EXCHANGES_CNT (32 * 1024)
+#define REDUCE_EXCHANGES_CNT (8 * 1024)
struct req_que;
struct qla_tgt_sess;
@@ -315,6 +317,29 @@ struct srb_cmd {
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -338,6 +363,7 @@ struct ct_arg {
u32 rsp_size;
void *req;
void *rsp;
+ port_id_t id;
};
/*
@@ -416,6 +442,7 @@ struct srb_iocb {
struct {
uint32_t cmd_hndl;
__le16 comp_status;
+ __le16 req_que_no;
struct completion comp;
} abt;
struct ct_arg ctarg;
@@ -448,6 +475,10 @@ struct srb_iocb {
uint32_t timeout_sec;
struct list_head entry;
} nvme;
+ struct {
+ u16 cmd;
+ u16 vp_index;
+ } ctrlvp;
} u;
struct timer_list timer;
@@ -476,6 +507,8 @@ struct srb_iocb {
#define SRB_NVME_CMD 19
#define SRB_NVME_LS 20
#define SRB_PRLI_CMD 21
+#define SRB_CTRL_VP 22
+#define SRB_PRLO_CMD 23
enum {
TYPE_SRB,
@@ -499,8 +532,12 @@ typedef struct srb {
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
+ int rc;
+ int retry_count;
+ struct completion comp;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2164,28 +2201,6 @@ struct imm_ntfy_from_isp {
#define REQUEST_ENTRY_SIZE (sizeof(request_t))
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
/*
* Switch info gathering structure.
@@ -2257,14 +2272,17 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
+ DSC_GNN_ID,
DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
DSC_GPDB,
+ DSC_GFPN_ID,
DSC_GPSC,
DSC_UPD_FCPORT,
DSC_LOGIN_COMPLETE,
+ DSC_ADISC,
DSC_DELETE_PEND,
};
@@ -2290,7 +2308,9 @@ enum fcport_mgt_event {
FCME_GPDB_DONE,
FCME_GPNID_DONE,
FCME_GFFID_DONE,
- FCME_DELETE_DONE,
+ FCME_ADISC_DONE,
+ FCME_GNNID_DONE,
+ FCME_GFPNID_DONE,
};
enum rscn_addr_format {
@@ -2315,6 +2335,7 @@ typedef struct fc_port {
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
+ unsigned int free_pending:1;
unsigned int local:1;
unsigned int logout_on_delete:1;
unsigned int logo_ack_needed:1;
@@ -2323,6 +2344,7 @@ typedef struct fc_port {
unsigned int login_pause:1;
unsigned int login_succ:1;
unsigned int query:1;
+ unsigned int id_changed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2434,6 +2456,7 @@ static const char * const port_state_str[] = {
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
+#define FCF_ASYNC_ACTIVE BIT_5
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2470,6 +2493,11 @@ static const char * const port_state_str[] = {
#define GA_NXT_REQ_SIZE (16 + 4)
#define GA_NXT_RSP_SIZE (16 + 620)
+#define GPN_FT_CMD 0x172
+#define GPN_FT_REQ_SIZE (16 + 4)
+#define GNN_FT_CMD 0x173
+#define GNN_FT_REQ_SIZE (16 + 4)
+
#define GID_PT_CMD 0x1A1
#define GID_PT_REQ_SIZE (16 + 4)
@@ -2725,6 +2753,13 @@ struct ct_sns_req {
} port_id;
struct {
+ uint8_t reserved;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t port_type;
+ } gpn_ft;
+
+ struct {
uint8_t port_type;
uint8_t domain;
uint8_t area;
@@ -2837,6 +2872,27 @@ struct ct_sns_gid_pt_data {
uint8_t port_id[3];
};
+/* It's the same for both GPN_FT and GNN_FT */
+struct ct_sns_gpnft_rsp {
+ struct {
+ struct ct_cmd_hdr header;
+ uint16_t response;
+ uint16_t residual;
+ uint8_t fragment_id;
+ uint8_t reason_code;
+ uint8_t explanation_code;
+ uint8_t vendor_unique;
+ };
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gpn_ft_data {
+ u8 control_byte;
+ u8 port_id[3];
+ u32 reserved;
+ u8 port_name[8];
+ } entries[1];
+};
+
+/* CT command response */
struct ct_sns_rsp {
struct ct_rsp_hdr header;
@@ -2912,6 +2968,33 @@ struct ct_sns_pkt {
} p;
};
+struct ct_sns_gpnft_pkt {
+ union {
+ struct ct_sns_req req;
+ struct ct_sns_gpnft_rsp rsp;
+ } p;
+};
+
+enum scan_flags_t {
+ SF_SCANNING = BIT_0,
+ SF_QUEUED = BIT_1,
+};
+
+struct fab_scan_rp {
+ port_id_t id;
+ u8 port_name[8];
+ u8 node_name[8];
+};
+
+struct fab_scan {
+ struct fab_scan_rp *l;
+ u32 size;
+ u16 scan_retry;
+#define MAX_SCAN_RETRIES 5
+ enum scan_flags_t scan_flags;
+ struct delayed_work scan_work;
+};
+
/*
* SNS command structures -- for 2200 compatibility.
*/
@@ -3117,7 +3200,7 @@ enum qla_work_type {
QLA_EVT_AENFX,
QLA_EVT_GIDPN,
QLA_EVT_GPNID,
- QLA_EVT_GPNID_DONE,
+ QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
@@ -3125,6 +3208,15 @@ enum qla_work_type {
QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
+ QLA_EVT_RELOGIN,
+ QLA_EVT_ASYNC_PRLO,
+ QLA_EVT_ASYNC_PRLO_DONE,
+ QLA_EVT_GPNFT,
+ QLA_EVT_GPNFT_DONE,
+ QLA_EVT_GNNFT_DONE,
+ QLA_EVT_GNNID,
+ QLA_EVT_GFPNID,
+ QLA_EVT_SP_RETRY,
};
@@ -3166,7 +3258,9 @@ struct qla_work_evt {
struct {
port_id_t id;
u8 port_name[8];
+ u8 node_name[8];
void *pla;
+ u8 fc4_type;
} new_sess;
struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
fc_port_t *fcport;
@@ -3177,6 +3271,9 @@ struct qla_work_evt {
u8 iocb[IOCB_SIZE];
int type;
} nack;
+ struct {
+ u8 fc4_type;
+ } gpnft;
} u;
};
@@ -3433,10 +3530,6 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
-#define QLA_EARLY_LINKUP(_ha) \
- ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
- _ha->flags.fw_started && !_ha->flags.fw_init_done)
-
/*
* Qlogic host adapter specific data structure.
*/
@@ -3494,8 +3587,10 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
+ uint32_t rida_fmt2:1;
} flags;
+ uint16_t max_exchg;
uint16_t long_range_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3713,6 +3808,8 @@ struct qla_hw_data {
(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3795,7 +3892,7 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
- void *swl;
+ void *swl;
/* These are used by mailbox operations. */
uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -4107,6 +4204,7 @@ typedef struct scsi_qla_host {
#define LOOP_READY 5
#define LOOP_DEAD 6
+ unsigned long relogin_jif;
unsigned long dpc_flags;
#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
#define RESET_ACTIVE 1
@@ -4139,6 +4237,7 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
+#define IOCB_WORK_ACTIVE 31
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4252,6 +4351,8 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ struct list_head gpnid_list;
+ struct fab_scan scan;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4511,6 +4612,16 @@ struct sff_8247_a0 {
#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+#define SAVE_TOPO(_ha) { \
+ if (_ha->current_topology) \
+ _ha->prev_topology = _ha->current_topology; \
+}
+
+#define N2N_TOPO(ha) \
+ ((ha->prev_topology == ISP_CFG_N && !ha->current_topology) || \
+ ha->current_topology == ISP_CFG_N || \
+ !ha->current_topology)
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index d231e7156134..0b190082aa8d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -127,21 +127,32 @@ static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
- struct qla_hw_data *ha = vha->hw;
+ uint16_t mb[MAX_IOCB_MB_REG];
+ int rc;
+
+ rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
+ if (rc != QLA_SUCCESS) {
+ seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
+ } else {
+ seq_puts(s, "FW Resource count\n\n");
+ seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
+ seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "MAX VP count[%d]\n", mb[11]);
+ seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
+ seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
+ mb[20]);
+ seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
+ mb[21]);
+ seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
+ mb[22]);
+ seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
+ mb[23]);
- seq_puts(s, "FW Resource count\n\n");
- seq_printf(s, "Original TGT exchg count[%d]\n",
- ha->orig_fw_tgt_xcb_count);
- seq_printf(s, "current TGT exchg count[%d]\n",
- ha->cur_fw_tgt_xcb_count);
- seq_printf(s, "original Initiator Exchange count[%d]\n",
- ha->orig_fw_xcb_count);
- seq_printf(s, "Current Initiator Exchange count[%d]\n",
- ha->cur_fw_xcb_count);
- seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
- seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
- seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
- seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
+ }
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d5cef0727e72..5d8688e5bc7c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1392,7 +1392,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t port_name[8];
uint8_t node_name[8];
- uint32_t remote_nport_id;
+ uint8_t remote_nport_id[4];
uint32_t reserved_5;
} f2;
} u;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fa115c7433e5..e9295398050c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
@@ -104,11 +105,18 @@ int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
-int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
- void *);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
+ void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -148,6 +156,7 @@ extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
+extern int qla2xuseresexchforels;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -203,6 +212,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
/*
* Global Functions in qla_mid.c source file.
@@ -494,6 +504,7 @@ int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
/*
* Global Function Prototypes in qla_isr.c source file.
@@ -639,14 +650,26 @@ extern void qla2x00_free_fcport(fc_port_t *);
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
-void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gpnft(scsi_qla_host_t *, u8);
+void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
+void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
+int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
+void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
+void qla_scan_work_fn(struct work_struct *);
+
/*
* Global Function Prototypes in qla_attr.c source file.
*/
@@ -864,8 +887,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
struct fc_port *, enum qlt_plogi_link_t);
void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
-extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
-extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *);
extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index bc3db6abc9a0..5bf9a59432f6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -14,6 +14,10 @@ static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_rft_id(scsi_qla_host_t *);
static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
+static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
+static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
+static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
+static int qla_async_rsnn_nn(scsi_qla_host_t *);
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
@@ -175,6 +179,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
+ case CS_TIMEOUT:
+ rval = QLA_FUNCTION_TIMEOUT;
+ /* fall through */
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -508,6 +515,72 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
+static void qla2x00_async_sns_sp_done(void *s, int rc)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct ct_sns_pkt *ct_sns;
+ struct qla_work_evt *e;
+
+ sp->rc = rc;
+ if (rc == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s exiting normally.\n",
+ sp->name);
+ } else if (rc == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s timeout\n", sp->name);
+ } else {
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ sp->retry_count++;
+ if (sp->retry_count > 3)
+ goto err;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s fail rc %x. Retry count %d\n",
+ sp->name, rc, sp->retry_count);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
+ if (!e)
+ goto err2;
+
+ del_timer(&sp->u.iocb_cmd.timer);
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ return;
+ }
+
+err:
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+err2:
+ if (!e) {
+ /* please ignore kernel warning. otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return;
+ }
+
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+}
+
/**
* qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
* @ha: HA context
@@ -517,57 +590,87 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_rft_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rft_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFT_ID_REQ_SIZE;
- arg.rsp_size = RFT_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rftid(vha, &vha->d_id);
+}
- /* Issue RFT_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rft_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
- RFT_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, FC-4 types */
ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
-
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
if (vha->flags.nvme_enabled)
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2044,
- "RFT_ID exiting normally.\n");
+ goto done_free_sp;
}
-
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x.\n",
+ sp->name, sp->handle, d_id->b24);
+ return rval;
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -579,12 +682,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
int
qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -592,47 +690,81 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFF_ID_REQ_SIZE;
- arg.rsp_size = RFF_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
+ FC4_TYPE_FCP_SCSI);
+}
- /* Issue RFF_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 fc4feature, u8 fc4type)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
- RFF_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
- /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
- ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
- ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
- ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rff_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- qlt_rff_id(vha, ct_req);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
- ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
+ ct_req->req.rff_id.port_id[0] = d_id->b.domain;
+ ct_req->req.rff_id.port_id[1] = d_id->b.area;
+ ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
+ ct_req->req.rff_id.fc4_feature = fc4feature;
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2047,
"RFF_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2048,
- "RFF_ID exiting normally.\n");
+ goto done_free_sp;
}
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
+ sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -644,54 +776,85 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
int
qla2x00_rnn_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rnn_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RNN_ID_REQ_SIZE;
- arg.rsp_size = RNN_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
+}
- /* Issue RNN_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 *node_name)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rnid";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, node_name */
ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
-
memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x204d,
"RNN_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204e,
- "RNN_ID exiting normally.\n");
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x\n",
+ sp->name, sp->handle, d_id->b24);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
void
@@ -718,12 +881,7 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
int
qla2x00_rsnn_nn(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -731,22 +889,49 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = 0;
- arg.rsp_size = RSNN_NN_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rsnn_nn(vha);
+}
- /* Issue RSNN_NN */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rsnn_nn";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
- RSNN_NN_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
/* Prepare CT arguments -- node_name, symbolic node_name, size */
memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
@@ -754,32 +939,33 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
/* Prepare the Symbolic Node Name */
qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
sizeof(ct_req->req.rsnn_nn.sym_node_name));
-
- /* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
(uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
- /* Update MS IOCB request */
- ms_pkt->req_bytecount =
- cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2051,
- "RSNN_NN issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2052,
- "RSNN_NN exiting normally.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x.\n",
+ sp->name, sp->handle);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -2790,15 +2976,20 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC login state %d\n",
- __func__, fcport->port_name, fcport->fw_login_state);
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->sp->gen2 != fcport->login_gen) {
/* PLOGI/PRLI/LOGO came in while cmd was out.*/
ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ "%s %8phC generation changed rscn %d|%d n",
__func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+ fcport->rscn_gen);
return;
}
@@ -2811,7 +3002,21 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable plugged into the same place */
switch (vha->host->active_mode) {
case MODE_TARGET:
- /* NOOP. let the other guy login to us.*/
+ if (fcport->fw_login_state ==
+ DSC_LS_PRLI_COMP) {
+ u16 data[2];
+ /*
+ * Late RSCN was delivered.
+ * Remote port already login'ed.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "%s %d %8phC post adisc\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ }
break;
case MODE_INITIATOR:
case MODE_DUAL:
@@ -2820,24 +3025,29 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post %s\n", __func__,
__LINE__, fcport->port_name,
(atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "gpdb" : "gnl");
+ FCS_ONLINE) ? "adisc" : "gnl");
if (atomic_read(&fcport->state) ==
- FCS_ONLINE)
- qla24xx_post_gpdb_work(vha,
- fcport, PDO_FORCE_ADISC);
- else
+ FCS_ONLINE) {
+ u16 data[2];
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ } else {
qla24xx_post_gnl_work(vha,
fcport);
+ }
break;
}
} else { /* fcport->d_id.b24 != ea->id.b24 */
fcport->d_id.b24 = ea->id.b24;
- if (fcport->deleted == QLA_SESS_DELETED) {
+ fcport->id_changed = 1;
+ if (fcport->deleted != QLA_SESS_DELETED) {
ql_dbg(ql_dbg_disc, vha, 0x2021,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
}
} else { /* ea->sp->gen1 != fcport->rscn_gen */
@@ -2854,7 +3064,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2042,
"%s %d %8phC post del sess\n", __func__,
__LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2045,
"%s %d %8phC login\n", __func__, __LINE__,
@@ -2878,7 +3088,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
struct event_arg ea;
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
@@ -2889,9 +3099,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GIDPN_DONE;
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s res %x, WWPN %8phC ID %3phC \n",
- sp->name, res, fcport->port_name, id);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s WWPN %8phC timed out.\n",
+ sp->name, fcport->port_name);
+ qla24xx_post_gidpn_work(sp->vha, fcport);
+ sp->free(sp);
+ return;
+ } else if (res) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s fail res %x, WWPN %8phC\n",
+ sp->name, res, fcport->port_name);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s good WWPN %8phC ID %3phC\n",
+ sp->name, fcport->port_name, id);
+ }
qla2x00_fcport_event_handler(vha, &ea);
@@ -2904,16 +3127,16 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GID_PN;
fcport->scan_state = QLA_FCPORT_SCAN;
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gidpn";
sp->gen1 = fcport->rscn_gen;
@@ -2954,8 +3177,8 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -2974,6 +3197,7 @@ int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -2986,9 +3210,39 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
+}
+
static void qla24xx_async_gpsc_sp_done(void *s, int res)
{
struct srb *sp = s;
@@ -3004,7 +3258,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3055,6 +3309,7 @@ done:
ea.event = FCME_GPSC_DONE;
ea.rc = res;
ea.fcport = fcport;
+ ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
sp->free(sp);
@@ -3066,14 +3321,14 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
@@ -3113,8 +3368,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -3133,7 +3388,7 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
return qla2x00_post_work(vha, e);
}
-void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
@@ -3155,43 +3410,137 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport;
- unsigned long flags;
+ fc_port_t *fcport, *conflict, *t;
+ u16 data[2];
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d port_id: %06x\n",
+ __func__, __LINE__, ea->id.b24);
- if (fcport) {
- /* cable moved. just plugged in */
- fcport->rscn_gen++;
- fcport->d_id = ea->id;
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0x2064,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- break;
+ if (ea->rc) {
+ /* cable is disconnected */
+ list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
+ if (fcport->d_id.b24 == ea->id.b24) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->disc_state);
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ }
}
} else {
- /* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0x2065,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
+ /* cable is connected */
+ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+ if (fcport) {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if ((conflict->d_id.b24 == ea->id.b24) &&
+ (fcport != conflict)) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
+
+ fcport->rscn_gen++;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ switch (fcport->disc_state) {
+ case DSC_LOGIN_COMPLETE:
+ /* recheck session is still intact. */
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC revalidate session with ADISC\n",
+ __func__, __LINE__, fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ fcport->d_id = ea->id;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ fcport->d_id = ea->id;
+ break;
+ default:
+ fcport->d_id = ea->id;
+ break;
+ }
+ } else {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if (conflict->d_id.b24 == ea->id.b24) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
- qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+ /* create new fcport */
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
+ qla24xx_post_newsess_work(vha, &ea->id,
+ ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3205,11 +3554,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
struct event_arg ea;
struct qla_work_evt *e;
+ unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s res %x ID %3phC. %8phC\n",
- sp->name, res, ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
+ if (res)
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
+ sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
+ sp->name, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3220,9 +3576,26 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GPNID_DONE;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_del(&sp->elem);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (res) {
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+ } else if (sp->gen1) {
+ /* There was another RSCN for this Nport ID */
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+
qla2x00_fcport_event_handler(vha, &ea);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
@@ -3253,8 +3626,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
- srb_t *sp;
+ srb_t *sp, *tsp;
struct ct_sns_pkt *ct_sns;
+ unsigned long flags;
if (!vha->flags.online)
goto done;
@@ -3265,8 +3639,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpnid";
+ sp->u.iocb_cmd.u.ctarg.id = *id;
+ sp->gen1 = 0;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(tsp, &vha->gpnid_list, elem) {
+ if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
+ tsp->gen1++;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ sp->free(sp);
+ goto done;
+ }
+ }
+ list_add_tail(&sp->elem, &vha->gpnid_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
@@ -3393,7 +3781,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -3441,3 +3829,720 @@ done_free_sp:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
+
+/* GPN_FT + GNN_FT*/
+static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ u64 twwn;
+ int rc = 0;
+
+ if (!ha->num_vhosts)
+ return 0;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ twwn = wwn_to_u64(vp->port_name);
+ if (wwn == twwn) {
+ rc = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return rc;
+}
+
+void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ fc_port_t *fcport;
+ u32 i, rc;
+ bool found;
+ u8 fc4type = sp->gen2;
+ struct fab_scan_rp *rp;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+
+ if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s scan stop due to chip reset %x/%x\n",
+ sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
+ goto out;
+ }
+
+ rc = sp->rc;
+ if (rc) {
+ vha->scan.scan_retry++;
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Fabric scan failed on all retries.\n");
+ }
+ goto out;
+ }
+ vha->scan.scan_retry = 0;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ u64 wwn;
+
+ rp = &vha->scan.l[i];
+ found = false;
+
+ wwn = wwn_to_u64(rp->port_name);
+ if (wwn == 0)
+ continue;
+
+ if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
+ continue;
+
+ /* Bypass reserved domain fields. */
+ if ((rp->id.b.domain & 0xf0) == 0xf0)
+ continue;
+
+ /* Bypass virtual ports of the same host. */
+ if (qla2x00_is_a_vp(vha, wwn))
+ continue;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
+ continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->d_id.b24 = rp->id.b24;
+ found = true;
+ /*
+ * If device was not a fabric device before.
+ */
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ qla2x00_clear_loop_id(fcport);
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ }
+ break;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, rp->port_name);
+ qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
+ rp->node_name, NULL, fc4type);
+ }
+ }
+
+ /*
+ * Logout all previous fabric dev marked lost, except FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ continue;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ } else
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
+out:
+ qla24xx_sp_unmap(vha, sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_work_evt *e;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_gpnft_rsp *ct_rsp =
+ (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct ct_sns_gpn_ft_data *d;
+ struct fab_scan_rp *rp;
+ int i, j, k;
+ u16 cmd = be16_to_cpu(ct_req->command);
+
+ /* gen2 field is holding the fc4type */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x FC4Type %x\n",
+ sp->name, res, sp->gen2);
+
+ if (res) {
+ unsigned long flags;
+
+ sp->free(sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s rescan failed on all retries\n",
+ sp->name);
+ }
+ return;
+ }
+
+ if (!res) {
+ port_id_t id;
+ u64 wwn;
+
+ j = 0;
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ d = &ct_rsp->entries[i];
+
+ id.b.rsvd_1 = 0;
+ id.b.domain = d->port_id[0];
+ id.b.area = d->port_id[1];
+ id.b.al_pa = d->port_id[2];
+ wwn = wwn_to_u64(d->port_name);
+
+ if (id.b24 == 0 || wwn == 0)
+ continue;
+
+ if (cmd == GPN_FT_CMD) {
+ rp = &vha->scan.l[j];
+ rp->id = id;
+ memcpy(rp->port_name, d->port_name, 8);
+ j++;
+ } else {/* GNN_FT_CMD */
+ for (k = 0; k < vha->hw->max_fibre_devices;
+ k++) {
+ rp = &vha->scan.l[k];
+ if (id.b24 == rp->id.b24) {
+ memcpy(rp->node_name,
+ d->port_name, 8);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (cmd == GPN_FT_CMD)
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
+ else
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
+ if (!e) {
+ /* please ignore kernel warning. Otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s unable to alloc work element\n",
+ sp->name);
+ sp->free(sp);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp->rc = res;
+ e->u.iosb.sp = sp;
+
+ qla2x00_post_work(vha, e);
+}
+
+/*
+ * Get WWNN list for fc4_type
+ *
+ * It is assumed the same SRB is re-used from GPNFT to avoid
+ * mem free & re-alloc
+ */
+static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
+ u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: req %p rsp %p are not setup\n",
+ __func__, sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.rsp);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ WARN_ON(1);
+ goto done_free_sp;
+ }
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
+ sp->u.iocb_cmd.u.ctarg.rsp_size);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+} /* GNNFT */
+
+void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+ del_timer(&sp->u.iocb_cmd.timer);
+ qla24xx_async_gnnft(vha, sp, sp->gen2);
+}
+
+/* Get WWPN list for certain fc4_type */
+int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+ u32 rspsz;
+ unsigned long flags;
+
+ if (!vha->flags.online)
+ return rval;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags & SF_SCANNING) {
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ return rval;
+ }
+ vha->scan.scan_flags |= SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ return rval;
+ }
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ rspsz = sizeof(struct ct_sns_gpnft_rsp) +
+ ((vha->hw->max_fibre_devices - 1) *
+ sizeof(struct ct_sns_gpn_ft_data));
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ memset(vha->scan.l, 0, vha->scan.size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+}
+
+void qla_scan_work_fn(struct work_struct *work)
+{
+ struct fab_scan *s = container_of(to_delayed_work(work),
+ struct fab_scan, scan_work);
+ struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
+ scan);
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule loop resync\n", __func__);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_QUEUED;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+/* GNN_ID */
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ qla24xx_post_gnl_work(vha, ea->fcport);
+}
+
+static void qla2x00_async_gnnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
+ struct event_arg ea;
+ u64 wwnn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwnn = wwn_to_u64(node_name);
+ if (wwnn)
+ memcpy(fcport->node_name, node_name, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GNNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->node_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GNN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
+ GNN_ID_RSP_SIZE);
+
+ /* GNN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gnnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+/* GPFN_ID */
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_gpsc_work(vha, fcport);
+}
+
+static void qla2x00_async_gfpnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
+ struct event_arg ea;
+ u64 wwn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwn = wwn_to_u64(fpn);
+ if (wwn)
+ memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GFPNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->fabric_port_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GFPN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gfpnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
+ GFPN_ID_RSP_SIZE);
+
+ /* GFPN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gfpnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1bafa043f9f1..aececf664654 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -41,6 +41,7 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
struct event_arg *);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
struct event_arg *);
+static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -58,7 +59,8 @@ qla2x00_sp_timeout(struct timer_list *t)
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- sp->free(sp);
+ if (sp->type != SRB_ELS_DCMD)
+ sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
@@ -102,14 +104,21 @@ qla2x00_async_iocb_timeout(void *data)
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ if (fcport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ } else {
+ pr_info("Async-%s timeout - hdl=%x.\n",
+ sp->name, sp->handle);
+ }
switch (sp->type) {
case SRB_LOGIN_CMD:
+ if (!fcport)
+ break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
@@ -123,6 +132,8 @@ qla2x00_async_iocb_timeout(void *data)
qla24xx_handle_plogi_done_event(fcport->vha, &ea);
break;
case SRB_LOGOUT_CMD:
+ if (!fcport)
+ break;
qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
break;
case SRB_CT_PTHRU_CMD:
@@ -130,6 +141,7 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PLOGI:
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
+ case SRB_CTRL_VP:
sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
}
@@ -146,7 +158,8 @@ qla2x00_async_login_sp_done(void *ptr, int res)
ql_dbg(ql_dbg_disc, vha, 0x20dd,
"%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
memset(&ea, 0, sizeof(ea));
ea.event = FCME_PLOGI_DONE;
@@ -173,11 +186,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (!vha->flags.online)
goto done;
- if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
- (fcport->fw_login_state == DSC_LS_PRLI_PEND))
- goto done;
-
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -185,8 +193,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->flags |= FCF_ASYNC_SENT;
fcport->logout_completed = 0;
+ fcport->disc_state = DSC_LOGIN_PEND;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
@@ -201,7 +212,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -216,8 +226,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -227,7 +237,7 @@ qla2x00_async_logout_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
@@ -239,9 +249,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -272,16 +284,126 @@ done:
return rval;
}
+void
+qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ /* Don't re-login in target mode */
+ if (!fcport->tgt_session)
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qlt_logo_completion_handler(fcport, data[0]);
+}
+
+static void
+qla2x00_async_prlo_sp_done(void *s, int res)
+{
+ srb_t *sp = (srb_t *)s;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->vha;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp);
+}
+
+int
+qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_PRLO_CMD;
+ sp->name = "prlo";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_prlo_sp_done;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+static
+void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (ea->data[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "%s %8phC: adisc fail: post delete\n",
+ __func__, ea->fcport->port_name);
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ return;
+ }
+
+ if (ea->fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != ea->fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, ea->fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ return;
+ }
+
+ __qla24xx_handle_gpdb_event(vha, ea);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s res %x %8phC\n",
+ sp->name, res, sp->fcport->port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_ADISC_DONE;
+ ea.rc = res;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.fcport = sp->fcport;
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
sp->free(sp);
}
@@ -313,15 +435,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
return rval;
done_free_sp:
sp->free(sp);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
return rval;
}
@@ -333,9 +455,19 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
u16 i, n, found = 0, loop_id;
port_id_t id;
u64 wwn;
- u8 opt = 0, current_login_state;
+ u16 data[2];
+ u8 current_login_state;
fcport = ea->fcport;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc,
+ fcport->login_gen, fcport->last_login_gen,
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
@@ -356,9 +488,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
- "%s %8phC login gen changed login %d|%d\n",
- __func__, fcport->port_name,
- fcport->last_login_gen, fcport->login_gen);
+ "%s %8phC login gen changed\n",
+ __func__, fcport->port_name);
return;
}
@@ -400,7 +531,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e3,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport, 1);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -430,8 +561,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e4,
"%s %d %8phC post gpdb\n",
__func__, __LINE__, fcport->port_name);
- opt = PDO_FORCE_ADISC;
- qla24xx_post_gpdb_work(vha, fcport, opt);
+
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
case DSC_LS_PORT_UNAVAIL:
default:
@@ -449,36 +586,29 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (!found) {
/* fw has no record of this port */
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- } else {
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
-
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
- qlt_schedule_sess_for_deletion
- (conflict_fcport, 1);
- }
-
- if (fcport->loop_id == loop_id) {
- /* FW already picked this loop id for another fcport */
- qla2x00_find_new_loop_id(vha, fcport);
- }
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
}
+
+ /* FW already picked this loop id for another fcport */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -496,6 +626,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
struct get_name_list_extended *e;
u64 wwn;
struct list_head h;
+ bool found = false;
ql_dbg(ql_dbg_disc, vha, 0x20e7,
"Async done-%s res %x mb[1]=%x mb[2]=%x \n",
@@ -539,12 +670,44 @@ qla24xx_async_gnl_sp_done(void *s, int res)
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
+ /* create new fcport if fw has knowledge of new sessions */
+ for (i = 0; i < n; i++) {
+ port_id_t id;
+ u64 wwnn;
+
+ e = &vha->gnl.l[i];
+ wwn = wwn_to_u64(e->port_name);
+
+ found = false;
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!memcmp((u8 *)&wwn, fcport->port_name,
+ WWN_SIZE)) {
+ found = true;
+ break;
+ }
+ }
+
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
+
+ if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC %06x post new sess\n",
+ __func__, __LINE__, (u8 *)&wwn, id.b24);
+ wwnn = wwn_to_u64(e->node_name);
+ qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
+ (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ }
+ }
+
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -558,14 +721,13 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
unsigned long flags;
u16 *mb;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
@@ -573,8 +735,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
if (vha->gnl.sent) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
+ return QLA_SUCCESS;
}
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -582,6 +743,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -616,8 +779,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -630,6 +793,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -639,31 +803,18 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
- int rval = QLA_SUCCESS;
struct event_arg ea;
ql_dbg(ql_dbg_disc, vha, 0x20db,
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- if (res) {
- rval = res;
- goto gpd_error_out;
- }
-
- pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
-
- rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-gpd_error_out:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
- ea.rc = rval;
ea.fcport = fcport;
ea.sp = sp;
@@ -754,7 +905,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -783,6 +933,7 @@ int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
e->u.fcport.fcport = fcport;
e->u.fcport.opt = opt;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -796,16 +947,16 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GPDB;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
@@ -851,47 +1002,17 @@ done_free_sp:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
qla24xx_post_gpdb_work(vha, fcport, opt);
return rval;
}
static
-void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- int rval = ea->rc;
- fc_port_t *fcport = ea->fcport;
unsigned long flags;
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
- fcport->disc_state, fcport->fw_login_state, rval);
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* target side must have changed it. */
- ql_dbg(ql_dbg_disc, vha, 0x20d3,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen,
- fcport->login_gen);
- return;
- } else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- return;
- }
-
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- return;
- }
-
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
ea->fcport->deleted = 0;
@@ -905,47 +1026,157 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
!vha->hw->flags.gpsc_supported) {
ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
+ __func__, __LINE__, ea->fcport->port_name,
vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20d7,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
- vha->fcport_count);
-
- qla24xx_post_gpsc_work(vha, fcport);
+ if (ea->fcport->id_changed) {
+ ea->fcport->id_changed = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gfpnid fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gfpnid_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, ea->fcport);
+ }
}
+ } else if (ea->fcport->login_succ) {
+ /*
+ * We have an existing session. A late RSCN delivery
+ * must have triggered the session to be re-validate.
+ * Session is still valid.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "%s %d %8phC session revalidate success\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+ struct port_database_24xx *pd;
+ struct srb *sp = ea->sp;
+
+ pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, ea->rc);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ switch (pd->current_login_state) {
+ case PDS_PRLI_COMPLETE:
+ __qla24xx_parse_gpdb(vha, fcport, pd);
+ break;
+ case PDS_PLOGI_PENDING:
+ case PDS_PLOGI_COMPLETE:
+ case PDS_PRLI_PENDING:
+ case PDS_PRLI2_PENDING:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
+ __func__, __LINE__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ case PDS_LOGO_PENDING:
+ case PDS_PORT_UNAVAILABLE:
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ __qla24xx_handle_gpdb_event(vha, ea);
} /* gpdb event */
-int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
- if (fcport->login_retry == 0)
- return 0;
+ u8 login = 0;
+ int rc;
- if (fcport->scan_state != QLA_FCPORT_FOUND)
- return 0;
+ if (qla_tgt_mode_enabled(vha))
+ return;
+
+ if (qla_dual_mode_enabled(vha)) {
+ if (N2N_TOPO(vha->hw)) {
+ u64 mywwn, wwn;
+
+ mywwn = wwn_to_u64(vha->port_name);
+ wwn = wwn_to_u64(fcport->port_name);
+ if (mywwn > wwn)
+ login = 1;
+ else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ && time_after_eq(jiffies,
+ fcport->plogi_nack_done_deadline))
+ login = 1;
+ } else {
+ login = 1;
+ }
+ } else {
+ /* initiator mode */
+ login = 1;
+ }
+
+ if (login) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ rc = qla2x00_find_new_loop_id(vha, fcport);
+ if (rc) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess - out of loopid\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->scan_state = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ }
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+}
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ u16 data[2];
+ u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
- fcport->loop_id);
+ fcport->login_gen, fcport->login_retry,
+ fcport->loop_id, fcport->scan_state);
- fcport->login_retry--;
+ if (fcport->login_retry == 0)
+ return 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ return 0;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
+ }
}
/* for pure Target Mode. Login will not be initiated */
@@ -957,19 +1188,23 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
+ fcport->login_retry--;
+
switch (fcport->disc_state) {
case DSC_DELETED:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ wwn = wwn_to_u64(fcport->node_name);
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
ql_dbg(ql_dbg_disc, vha, 0x20bd,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_post_gnl_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20bf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla_chk_n2n_b4_login(vha, fcport);
}
break;
@@ -981,40 +1216,26 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
}
- if (fcport->flags & FCF_FCP2_DEVICE) {
- u8 opt = PDO_FORCE_ADISC;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c9,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
-
- fcport->disc_state = DSC_GPDB;
- qla24xx_post_gpdb_work(vha, fcport, opt);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
- }
-
+ qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gidpn_work(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ qla_chk_n2n_b4_login(vha, fcport);
+ else
+ qla24xx_post_gidpn_work(vha, fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post gpdb\n",
+ "%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
default:
@@ -1040,16 +1261,15 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
switch (fcport->disc_state) {
case DSC_DELETED:
case DSC_LOGIN_COMPLETE:
- qla24xx_post_gidpn_work(fcport->vha, fcport);
+ qla24xx_post_gpnid_work(fcport->vha, &ea->id);
break;
-
default:
break;
}
}
int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
- u8 *port_name, void *pla)
+ u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
struct qla_work_evt *e;
e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
@@ -1058,47 +1278,20 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
e->u.new_sess.id = *id;
e->u.new_sess.pla = pla;
+ e->u.new_sess.fc4_type = fc4_type;
memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+ if (node_name)
+ memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
return qla2x00_post_work(vha, e);
}
static
-int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
- struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- if (test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- switch (vha->host->active_mode) {
- case MODE_INITIATOR:
- case MODE_DUAL:
- if (fcport->scan_state == QLA_FCPORT_FOUND)
- qla24xx_fcport_handle_login(vha, fcport);
- break;
-
- case MODE_TARGET:
- default:
- /* no-op */
- break;
- }
-
- return 0;
-}
-
-static
void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
- if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->login_retry++;
- return;
- }
-
ql_dbg(ql_dbg_disc, vha, 0x2102,
"%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
__func__, fcport->port_name, fcport->disc_state,
@@ -1113,8 +1306,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
return;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
+ }
}
if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1132,7 +1327,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gidpn(vha, fcport);
+ qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1141,16 +1336,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport, *f, *tf;
+ fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- int rc;
+ unsigned long flags;
switch (ea->event) {
- case FCME_RELOGIN:
case FCME_RSCN:
case FCME_GIDPN_DONE:
case FCME_GPSC_DONE:
case FCME_GPNID_DONE:
+ case FCME_GNNID_DONE:
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
return;
@@ -1171,20 +1366,15 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (!fcport) {
- /* cable moved */
- rc = qla24xx_post_gpnid_work(vha, &ea->id);
- if (rc) {
- ql_log(ql_log_warn, vha, 0xd044,
- "RSCN GPNID work failed %02x%02x%02x\n",
- ea->id.b.domain, ea->id.b.area,
- ea->id.b.al_pa);
- }
- } else {
- ea->fcport = fcport;
- qla24xx_handle_rscn_event(fcport, ea);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
}
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1227,7 +1417,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
qla24xx_handle_gnl_done_event(vha, ea);
break;
case FCME_GPSC_DONE:
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ qla24xx_handle_gpsc_event(vha, ea);
break;
case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
qla24xx_handle_plogi_done_event(vha, ea);
@@ -1244,8 +1434,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFFID_DONE:
qla24xx_handle_gffid_event(vha, ea);
break;
- case FCME_DELETE_DONE:
- qla24xx_handle_delete_done_event(vha, ea);
+ case FCME_ADISC_DONE:
+ qla24xx_handle_adisc_event(vha, ea);
+ break;
+ case FCME_GNNID_DONE:
+ qla24xx_handle_gnnid_event(vha, ea);
+ break;
+ case FCME_GFPNID_DONE:
+ qla24xx_handle_gfpnid_event(vha, ea);
break;
default:
BUG_ON(1);
@@ -1327,6 +1523,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
done_free_sp:
sp->free(sp);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -1368,6 +1565,13 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
sp->name = "abort";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+
+ if (vha->flags.qpairs_available && cmd_sp->qpair)
+ abt_iocb->u.abt.req_que_no =
+ cpu_to_le16(cmd_sp->qpair->req->id);
+ else
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+
sp->done = qla24xx_abort_sp_done;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1402,6 +1606,9 @@ qla24xx_async_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -1452,6 +1659,42 @@ static void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
+ u16 lid;
+ struct fc_port *conflict_fcport;
+ unsigned long flags;
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC Remote is trying to login\n",
+ __func__, __LINE__, fcport->port_name);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
@@ -1467,11 +1710,19 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_prli_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name);
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id, ea->fcport->d_id.b24);
+
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
+ ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
}
break;
@@ -1513,8 +1764,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
ea->fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ lid = ea->iop[1] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(ea->fcport->port_name),
+ ea->fcport->d_id, lid, &conflict_fcport);
+
+ if (conflict_fcport) {
+ /*
+ * Another fcport share the same loop_id/nport id.
+ * Conflict fcport needs to finish cleanup before this
+ * fcport can proceed to login.
+ */
+ conflict_fcport->conflict = ea->fcport;
+ ea->fcport->login_pause = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(ea->fcport);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+
+ qla2x00_clear_loop_id(ea->fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ ea->fcport->loop_id = lid;
+ ea->fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
break;
}
return;
@@ -2540,70 +2821,27 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
return rval;
}
-void
-qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
{
int rval;
- uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
- eft_size, fce_size, mq_size;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
- if (ha->fw_dump) {
+ if (ha->eft) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
- "Firmware dump already allocated.\n");
+ "%s: Offload Mem is already allocated.\n",
+ __func__);
return;
}
- ha->fw_dumped = 0;
- ha->fw_dump_cap_flags = 0;
- dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
- req_q_size = rsp_q_size = 0;
-
- if (IS_QLA27XX(ha))
- goto try_fce;
-
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- fixed_size = sizeof(struct qla2100_fw_dump);
- } else if (IS_QLA23XX(ha)) {
- fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
- mem_size = (ha->fw_memory_size - 0x11000 + 1) *
- sizeof(uint16_t);
- } else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
- fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
- else if (IS_QLA81XX(ha))
- fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
- else if (IS_QLA25XX(ha))
- fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
- else
- fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
-
- mem_size = (ha->fw_memory_size - 0x100000 + 1) *
- sizeof(uint32_t);
- if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
- mq_size = sizeof(struct qla2xxx_mq_chain);
- /*
- * Allocate maximum buffer size for all queues.
- * Resizing must be done at end-of-dump processing.
- */
- mq_size += ha->max_req_queues *
- (req->length * sizeof(request_t));
- mq_size += ha->max_rsp_queues *
- (rsp->length * sizeof(response_t));
- }
- if (ha->tgt.atio_ring)
- mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ if (IS_FWI2_CAPABLE(ha)) {
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha))
goto try_eft;
-try_fce:
if (ha->fce)
dma_free_coherent(&ha->pdev->dev,
FCE_SIZE, ha->fce, ha->fce_dma);
@@ -2631,7 +2869,6 @@ try_fce:
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
- fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
@@ -2648,7 +2885,7 @@ try_eft:
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
EFT_SIZE / 1024);
- goto cont_alloc;
+ goto eft_err;
}
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
@@ -2657,17 +2894,76 @@ try_eft:
"Unable to initialize EFT (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
tc_dma);
- goto cont_alloc;
+ goto eft_err;
}
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
- eft_size = EFT_SIZE;
ha->eft_dma = tc_dma;
ha->eft = tc;
}
-cont_alloc:
+eft_err:
+ return;
+}
+
+void
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+{
+ uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+ eft_size, fce_size, mq_size;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct qla2xxx_fw_dump *fw_dump;
+
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ fixed_size = sizeof(struct qla2100_fw_dump);
+ } else if (IS_QLA23XX(ha)) {
+ fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
+ mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+ sizeof(uint16_t);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
+ fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+ else if (IS_QLA25XX(ha))
+ fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+ else
+ fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
+ mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+ sizeof(uint32_t);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
+ if (ha->tgt.atio_ring)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ goto try_eft;
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+try_eft:
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+ eft_size = EFT_SIZE;
+ }
+
if (IS_QLA27XX(ha)) {
if (!ha->fw_dump_template) {
ql_log(ql_log_warn, vha, 0x00ba,
@@ -2695,51 +2991,44 @@ cont_alloc:
ha->exlogin_size;
allocate:
- ha->fw_dump = vmalloc(dump_size);
- if (!ha->fw_dump) {
- ql_log(ql_log_warn, vha, 0x00c4,
- "Unable to allocate (%d KB) for firmware dump.\n",
- dump_size / 1024);
-
- if (ha->fce) {
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
- ha->fce = NULL;
- ha->fce_dma = 0;
- }
-
- if (ha->eft) {
- dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
- ha->eft_dma);
- ha->eft = NULL;
- ha->eft_dma = 0;
+ if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
+ fw_dump = vmalloc(dump_size);
+ if (!fw_dump) {
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+ } else {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ ha->fw_dump = fw_dump;
+
+ ha->fw_dump_len = dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+ if (IS_QLA27XX(ha))
+ return;
+
+ ha->fw_dump->signature[0] = 'Q';
+ ha->fw_dump->signature[1] = 'L';
+ ha->fw_dump->signature[2] = 'G';
+ ha->fw_dump->signature[3] = 'C';
+ ha->fw_dump->version = htonl(1);
+
+ ha->fw_dump->fixed_size = htonl(fixed_size);
+ ha->fw_dump->mem_size = htonl(mem_size);
+ ha->fw_dump->req_q_size = htonl(req_q_size);
+ ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+ ha->fw_dump->eft_size = htonl(eft_size);
+ ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
+ ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+
+ ha->fw_dump->header_size =
+ htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
- return;
}
- ha->fw_dump_len = dump_size;
- ql_dbg(ql_dbg_init, vha, 0x00c5,
- "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
-
- if (IS_QLA27XX(ha))
- return;
-
- ha->fw_dump->signature[0] = 'Q';
- ha->fw_dump->signature[1] = 'L';
- ha->fw_dump->signature[2] = 'G';
- ha->fw_dump->signature[3] = 'C';
- ha->fw_dump->version = htonl(1);
-
- ha->fw_dump->fixed_size = htonl(fixed_size);
- ha->fw_dump->mem_size = htonl(mem_size);
- ha->fw_dump->req_q_size = htonl(req_q_size);
- ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
-
- ha->fw_dump->eft_size = htonl(eft_size);
- ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
- ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
-
- ha->fw_dump->header_size =
- htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
static int
@@ -3065,9 +3354,12 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && ql2xallocfwdump
- && !(IS_P3P_TYPE(ha)))
+ if (!fw_major_version && !(IS_P3P_TYPE(ha)))
+ qla2x00_alloc_offload_mem(vha);
+
+ if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+
} else {
goto failed;
}
@@ -3278,6 +3570,12 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_4;
else
ha->fw_options[2] &= ~BIT_4;
+
+ /* Reserve 1/2 of emergency exchanges for ELS.*/
+ if (qla2xuseresexchforels)
+ ha->fw_options[2] |= BIT_8;
+ else
+ ha->fw_options[2] &= ~BIT_8;
}
ql_dbg(ql_dbg_init, vha, 0x00e8,
@@ -3671,6 +3969,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
port_id_t id;
+ unsigned long flags;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3752,7 +4051,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.area = area;
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_host_map(vha, id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4293,6 +4594,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
+ if (ha->flags.rida_fmt2) {
+ /* With Rida Format 2, the login is already triggered.
+ * We know who is on the other side of the wire.
+ * No need to login to do login to find out or drop into
+ * qla2x00_configure_local_loop().
+ */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ }
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -4521,6 +4837,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
@@ -4531,22 +4851,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /*
- * Mark local devices that were present with FCF_DEVICE_LOST for now.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&fcport->state) == FCS_ONLINE &&
- fcport->port_type != FCT_BROADCAST &&
- (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
-
- ql_dbg(ql_dbg_disc, vha, 0x2096,
- "Marking port lost loop_id=0x%04x.\n",
- fcport->loop_id);
-
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
- }
- }
-
/* Inititae N2N login. */
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
rval = qla24xx_n2n_handle_login(vha, new_fcport);
@@ -4589,6 +4893,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+ new_fcport->scan_state = QLA_FCPORT_FOUND;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
@@ -4620,13 +4925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->d_id.b24 = new_fcport->d_id.b24;
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
-
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
-
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
break;
}
@@ -4637,11 +4936,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate a new replacement fcport. */
fcport = new_fcport;
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -4662,11 +4956,38 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
- qla2x00_update_fcport(vha, fcport);
-
found_devs++;
}
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ }
+
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
cleanup_allocation:
kfree(new_fcport);
@@ -4920,9 +5241,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
/* Mark the time right before querying FW for connected ports.
* This process is long, asynchronous and by the time it's done,
@@ -4932,7 +5250,17 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* will be newer than discovery_gen. */
qlt_do_generation_tick(vha, &discovery_gen);
- rval = qla2x00_find_all_fabric_devs(vha);
+ if (USE_ASYNC_SCAN(ha)) {
+ rval = QLA_SUCCESS;
+ rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
+ if (rval)
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ rval = qla2x00_find_all_fabric_devs(vha);
+ }
if (rval != QLA_SUCCESS)
break;
} while (0);
@@ -5237,9 +5565,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
"%s %d %8phC post del sess\n",
__func__, __LINE__,
fcport->port_name);
-
- qlt_schedule_sess_for_deletion_lock
- (fcport);
+ qlt_schedule_sess_for_deletion(fcport);
continue;
}
}
@@ -5974,6 +6300,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ SAVE_TOPO(ha);
+ ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -8173,9 +8501,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
int ret = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = qpair->hw;
- if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
- goto fail;
-
qpair->delete_in_progress = 1;
while (atomic_read(&qpair->ref_count))
msleep(500);
@@ -8183,6 +8508,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
goto fail;
+
ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
if (ret != QLA_SUCCESS)
goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 17d2c20f1f75..4d32426393c7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -273,6 +273,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
+ init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d810a447cb4a..1b62e943ec49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2158,7 +2158,9 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (qpair->use_shadow_reg)
+ cnt = *req->out_ptr;
+ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2392,26 +2394,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- /* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_dbg(ql_dbg_io, vha, 0x3070,
- "mbx abort_command failed.\n");
- } else {
- ql_dbg(ql_dbg_io, vha, 0x3071,
- "mbx abort_command success.\n");
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
complete(&lio->u.els_logo.comp);
}
@@ -2631,7 +2620,7 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
struct scsi_qla_host *vha = sp->vha;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8pC\n",
+ "%s ELS hdl=%x, portid=%06x done %8phC\n",
sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
complete(&lio->u.els_plogi.comp);
@@ -3286,7 +3275,9 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle =
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
@@ -3294,7 +3285,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(req->id);
+ abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
wmb();
}
@@ -3381,6 +3372,40 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
return rval;
}
+static void
+qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
+{
+ int map, pos;
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->handle = sp->handle;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
+ vce->vp_count = cpu_to_le16(1);
+
+ /*
+ * index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
+ pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
+ vce->vp_idx_map[map] |= 1 << pos;
+}
+
+static void
+qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags =
+ cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
+
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3459,6 +3484,12 @@ qla2x00_start_sp(srb_t *sp)
case SRB_NACK_LOGO:
qla2x00_send_notify_ack_iocb(sp, pkt);
break;
+ case SRB_CTRL_VP:
+ qla25xx_ctrlvp_iocb(sp, pkt);
+ break;
+ case SRB_PRLO_CMD:
+ qla24xx_prlo_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2fd79129bb2a..14109d86c3f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -809,6 +809,7 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ SAVE_TOPO(ha);
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -922,7 +923,6 @@ skip_rio:
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
- ha->flags.gpsc_supported = 1;
vha->flags.management_server_logged_in = 0;
break;
@@ -1009,7 +1009,7 @@ skip_rio:
if (qla_ini_mode_enabled(vha)) {
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
fcport->logout_on_delete = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -1059,8 +1059,7 @@ global_port_update:
* Mark all devices as missing so we will login again.
*/
atomic_set(&vha->loop_state, LOOP_UP);
-
- qla2x00_mark_all_devices_lost(vha, 1);
+ vha->scan.scan_retry = 0;
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1202,6 +1201,7 @@ global_port_update:
qla2xxx_wake_dpc(vha);
}
}
+ /* fall through */
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
@@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* borrowing sts_entry_24xx.comp_status.
same location as ct_entry_24xx.comp_status
*/
- res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
sp->name);
sp->done(sp, res);
@@ -1769,7 +1769,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* drop through */
+ /* fall through */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1936,6 +1936,37 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
sp->done(sp, ret);
}
+static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
+ struct vp_ctrl_entry_24xx *vce)
+{
+ const char func[] = "CTRLVP-IOCB";
+ srb_t *sp;
+ int rval = QLA_SUCCESS;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
+ if (!sp)
+ return;
+
+ if (vce->entry_status != 0) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c4,
+ "%s: Failed to complete IOCB -- error status (%x)\n",
+ sp->name, vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c5,
+ "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
+ sp->name, le16_to_cpu(vce->comp_status),
+ le16_to_cpu(vce->vp_idx_failed));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_vport, vha, 0x10c6,
+ "Done %s.\n", __func__);
+ }
+
+ sp->rc = rval;
+ sp->done(sp, rval);
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2369,7 +2400,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int res = 0;
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
- uint8_t no_logout = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2640,7 +2670,6 @@ check_scsi_status:
break;
case CS_PORT_LOGGED_OUT:
- no_logout = 1;
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
@@ -2671,11 +2700,8 @@ check_scsi_status:
port_state_str[atomic_read(&fcport->state)],
comp_status);
- if (no_logout)
- fcport->logout_on_delete = 0;
-
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -2972,9 +2998,9 @@ process_err:
(response_t *)pkt);
break;
} else {
- /* drop through */
qlt_24xx_process_atio_queue(vha, 1);
}
+ /* fall through */
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3005,6 +3031,10 @@ process_err:
qla24xx_mbx_iocb_entry(vha, rsp->req,
(struct mbx_24xx_entry *)pkt);
break;
+ case VP_CTRL_IOCB_TYPE:
+ qla_ctrlvp_completed(vha, rsp->req,
+ (struct vp_ctrl_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb717d47339f..7397aeddd96c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -17,6 +17,7 @@ static struct mb_cmd_name {
{MBC_GET_PORT_DATABASE, "GPDB"},
{MBC_GET_ID_LIST, "GIDList"},
{MBC_GET_LINK_PRIV_STATS, "Stats"},
+ {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
};
static const char *mb_to_str(uint16_t cmd)
@@ -3731,6 +3732,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
unsigned long flags;
int found;
port_id_t id;
+ struct fc_port *fcport;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3753,7 +3755,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
-
+ ha->current_topology = ISP_CFG_NL;
qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
@@ -3797,6 +3799,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ ha->flags.gpsc_supported = 1;
+ ha->current_topology = ISP_CFG_F;
/* buffer to buffer credit flag */
vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3862,6 +3866,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name);
/* N2N. direct connect */
+ ha->current_topology = ISP_CFG_N;
+ ha->flags.rida_fmt2 = 1;
vha->d_id.b.domain = rptid_entry->port_id[2];
vha->d_id.b.area = rptid_entry->port_id[1];
vha->d_id.b.al_pa = rptid_entry->port_id[0];
@@ -3869,6 +3875,40 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f2.port_name, 1);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ } else {
+ id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
+ id.b.area = rptid_entry->u.f2.remote_nport_id[1];
+ id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f2.port_name,
+ rptid_entry->u.f2.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3945,83 +3985,6 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
}
/*
- * qla24xx_control_vp
- * Enable a virtual port for given host
- *
- * Input:
- * ha = adapter block pointer.
- * vhba = virtual adapter (unused)
- * index = index number for enabled VP
- *
- * Returns:
- * qla2xxx local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
-{
- int rval;
- int map, pos;
- struct vp_ctrl_entry_24xx *vce;
- dma_addr_t vce_dma;
- struct qla_hw_data *ha = vha->hw;
- int vp_index = vha->vp_idx;
- struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
- "Entered %s enabling index %d.\n", __func__, vp_index);
-
- if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
- return QLA_PARAMETER_ERROR;
-
- vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
- if (!vce) {
- ql_log(ql_log_warn, vha, 0x10c2,
- "Failed to allocate VP control IOCB.\n");
- return QLA_MEMORY_ALLOC_FAILED;
- }
-
- vce->entry_type = VP_CTRL_IOCB_TYPE;
- vce->entry_count = 1;
- vce->command = cpu_to_le16(cmd);
- vce->vp_count = cpu_to_le16(1);
-
- /* index map in firmware starts with 1; decrement index
- * this is ok as we never use index 0
- */
- map = (vp_index - 1) / 8;
- pos = (vp_index - 1) & 7;
- mutex_lock(&ha->vport_lock);
- vce->vp_idx_map[map] |= 1 << pos;
- mutex_unlock(&ha->vport_lock);
-
- rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c3,
- "Failed to issue VP control IOCB (%x).\n", rval);
- } else if (vce->entry_status != 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c4,
- "Failed to complete IOCB -- error status (%x).\n",
- vce->entry_status);
- rval = QLA_FUNCTION_FAILED;
- } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c5,
- "Failed to complete IOCB -- completion status (%x).\n",
- le16_to_cpu(vce->comp_status));
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
- "Done %s.\n", __func__);
- }
-
- dma_pool_free(ha->s_dma_pool, vce, vce_dma);
-
- return rval;
-}
-
-/*
* qla2x00_send_change_request
* Receive or disable RSCN request from fabric controller
*
@@ -6160,8 +6123,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
}
/* Check for logged in state. */
- if (current_login_state != PDS_PRLI_COMPLETE &&
- last_login_state != PDS_PRLI_COMPLETE) {
+ if (current_login_state != PDS_PRLI_COMPLETE) {
ql_dbg(ql_dbg_mbx, vha, 0x119a,
"Unable to verify login-state (%x/%x) for loop_id %x.\n",
current_login_state, last_login_state, fcport->loop_id);
@@ -6350,3 +6312,32 @@ qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
return rval;
}
+
+int qla24xx_res_count_wait(struct scsi_qla_host *vha,
+ uint16_t *out_mb, int out_mb_sz)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
+ memcpy(out_mb, mc.mb, out_mb_sz);
+ else
+ memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index bd9f14bf7ac2..e965b16f21e3 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -50,10 +50,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_vp_map(vha, SET_VP_IDX);
-
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&ha->vport_lock);
return vp_id;
@@ -158,9 +159,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
- spin_lock_irqsave(&vha->hw->vport_slock, flags);
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
- spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
@@ -264,13 +265,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_LIP_RESET:
case MBA_POINT_TO_POINT:
case MBA_CHG_IN_CONNECTION:
- case MBA_PORT_UPDATE:
- case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ if ((mb[3] & 0xff) == vha->vp_idx) {
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p\n",
+ i, *mb, vha);
+ qla2x00_async_event(vha, rsp, mb);
+ }
+ break;
}
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -319,8 +327,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
- qla2x00_do_work(vha);
-
/* Check if Fw is ready to configure VP first */
if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -343,15 +349,19 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
"FCPort update end.\n");
}
- if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
- !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
- atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+ atomic_read(&vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, vha, 0x4018,
- "Relogin needed scheduled.\n");
- qla2x00_relogin(vha);
- ql_dbg(ql_dbg_dpc, vha, 0x4019,
- "Relogin needed end.\n");
+ if (!vha->relogin_jif ||
+ time_after_eq(jiffies, vha->relogin_jif)) {
+ vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
+ qla24xx_post_relogin_work(vha);
+ }
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -475,7 +485,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+ vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
vha->dpc_flags = 0L;
@@ -569,14 +579,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (req) {
+ if (req && vha->flags.qpairs_req_created) {
req->options |= BIT_0;
ret = qla25xx_init_req_que(vha, req);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_req_que(vha, req);
+ }
return ret;
}
@@ -584,14 +596,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (rsp) {
+ if (rsp && vha->flags.qpairs_rsp_created) {
rsp->options |= BIT_0;
ret = qla25xx_init_rsp_que(vha, rsp);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_rsp_que(vha, rsp);
+ }
return ret;
}
@@ -884,3 +898,79 @@ que_failed:
failed:
return 0;
}
+
+static void qla_ctrlvp_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ complete(&sp->comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/**
+ * qla24xx_control_vp() - Enable a virtual port for given host
+ * @vha: adapter block pointer
+ * @cmd: command type to be sent for enable virtual port
+ *
+ * Return: qla2xxx local function return status code.
+ */
+int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ int vp_index = vha->vp_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+
+ ql_dbg(ql_dbg_vport, vha, 0x10c1,
+ "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
+
+ if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
+ return QLA_PARAMETER_ERROR;
+
+ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CTRL_VP;
+ sp->name = "ctrl_vp";
+ sp->done = qla_ctrlvp_sp_done;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
+ sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&sp->comp);
+ rval = sp->rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ goto done_free_sp;
+ default:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+done:
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 0aa9c38bf347..525ac35a757b 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -11,8 +11,6 @@
#include "qla_def.h"
#include "qla_gbl.h"
-#include <linux/delay.h>
-
#define TIMEOUT_100_MS 100
static const uint32_t qla8044_reg_tbl[] = {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 46f2d0cf7c0d..12ee6e02d146 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -277,6 +277,12 @@ MODULE_PARM_DESC(ql2xenablemsix,
" 1 -- enable MSI-X interrupt mechanism.\n"
" 2 -- enable MSI interrupt mechanism.\n");
+int qla2xuseresexchforels;
+module_param(qla2xuseresexchforels, int, 0444);
+MODULE_PARM_DESC(qla2xuseresexchforels,
+ "Reserve 1/2 of emergency exchanges for ELS.\n"
+ " 0 (default): disabled");
+
/*
* SCSI host template entry points
*/
@@ -294,7 +300,6 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
@@ -1705,93 +1710,103 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
-void
-qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+static void
+__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int que, cnt, status;
+ int cnt, status;
unsigned long flags;
srb_t *sp;
+ scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
uint8_t trace = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
- if (!req->outstanding_cmds)
- continue;
- for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (sp) {
- req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
- if (sp->type == SRB_NVME_CMD ||
- sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- qla_nvme_abort(ha, sp);
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- } else if (GET_CMD_SP(sp) &&
- !ha->flags.eeh_busy &&
- (!test_bit(ABORT_ISP_ACTIVE,
- &vha->dpc_flags)) &&
- (sp->type == SRB_SCSI_CMD)) {
- /*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
- * accessible/responding.
- *
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
- */
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
- }
- sp->done(sp, res);
- } else {
- if (!vha->hw->tgt.tgt_ops || !tgt ||
- qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
- continue;
- }
- cmd = (struct qla_tgt_cmd *)sp;
- qlt_abort_cmd_on_host_reset(cmd->vha,
- cmd);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ req = qp->req;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ req->outstanding_cmds[cnt] = NULL;
+ if (sp->cmd_type == TYPE_SRB) {
+ if (sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_NVME_LS) {
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ } else if (GET_CMD_SP(sp) &&
+ !ha->flags.eeh_busy &&
+ (!test_bit(ABORT_ISP_ACTIVE,
+ &vha->dpc_flags)) &&
+ (sp->type == SRB_SCSI_CMD)) {
+ /*
+ * Don't abort commands in
+ * adapter during EEH
+ * recovery as it's not
+ * accessible/responding.
+ *
+ * Get a reference to the sp
+ * and drop the lock. The
+ * reference ensures this
+ * sp->done() call and not the
+ * call in qla2xxx_eh_abort()
+ * ends the SCSI command (with
+ * result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ /*
+ * Get rid of extra reference
+ * if immediate exit from
+ * ql2xxx_eh_abort
+ */
+ if (status == FAILED &&
+ (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(
+ &sp->ref_count);
}
+ sp->done(sp, res);
+ } else {
+ if (!vha->hw->tgt.tgt_ops || !tgt ||
+ qla_ini_mode_enabled(vha)) {
+ if (!trace)
+ ql_dbg(ql_dbg_tgt_mgt,
+ vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
+ continue;
+ }
+ cmd = (struct qla_tgt_cmd *)sp;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
}
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+}
+
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+{
+ int que;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla2x00_abort_all_cmds(ha->base_qpair, res);
+
+ for (que = 0; que < ha->max_qpairs; que++) {
+ if (!ha->queue_pair_map[que])
+ continue;
+
+ __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
+ }
}
static int
@@ -2689,14 +2704,22 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
struct scsi_qla_host, iocb_work);
- int cnt = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int i = 20;
+ unsigned long flags;
- while (!list_empty(&vha->work_list)) {
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
+ while (!list_empty(&vha->work_list) && i > 0) {
qla2x00_do_work(vha);
- cnt++;
- if (cnt > 10)
- break;
+ i--;
}
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
}
/*
@@ -2790,6 +2813,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->max_exchg = FW_MAX_EXCHANGES_CNT;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3011,9 +3035,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha = qla2x00_create_host(sht, ha);
if (!base_vha) {
ret = -ENOMEM;
- qla2x00_mem_free(ha);
- qla2x00_free_req_que(ha, req);
- qla2x00_free_rsp_que(ha, rsp);
goto probe_hw_failed;
}
@@ -3023,7 +3044,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+ base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3074,7 +3095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
- goto probe_init_failed;
+ goto probe_hw_failed;
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
@@ -3193,10 +3214,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
if (ha->mqenable) {
bool mq = false;
bool startit = false;
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
if (QLA_TGT_MODE_ENABLED()) {
mq = true;
@@ -3390,6 +3412,9 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
+ qla2x00_mem_free(ha);
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
qla2x00_clear_drv_active(ha);
iospace_config_failed:
@@ -3448,8 +3473,13 @@ qla2x00_shutdown(struct pci_dev *pdev)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ }
/* Turn adapter off line */
vha->flags.online = 0;
@@ -3609,6 +3639,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ vfree(base_vha->scan.l);
+
if (IS_QLAFX00(ha))
qlafx00_driver_shutdown(base_vha, 20);
@@ -3628,10 +3660,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla84xx_put_chip(base_vha);
- /* Laser should be disabled only for ISP2031 */
- if (IS_QLA2031(ha))
- qla83xx_disable_laser(base_vha);
-
/* Disable timer */
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
@@ -3692,8 +3720,16 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ if (ha->flags.fw_started) {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ ha->flags.fw_started = 0;
+ }
+ }
vha->flags.online = 0;
@@ -3833,7 +3869,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
@@ -4221,6 +4257,9 @@ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
u32 temp;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
+ if (max_cnt > vha->hw->max_exchg)
+ max_cnt = vha->hw->max_exchg;
+
if (qla_ini_mode_enabled(vha)) {
if (ql2xiniexchg > max_cnt)
ql2xiniexchg = max_cnt;
@@ -4250,8 +4289,8 @@ int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
- u16 size, max_cnt;
- u32 temp;
+ u16 size, max_cnt;
+ u32 actual_cnt, totsz;
struct qla_hw_data *ha = vha->hw;
if (!ha->flags.exchoffld_enabled)
@@ -4268,16 +4307,19 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return rval;
}
- qla2x00_number_of_exch(vha, &temp, max_cnt);
- temp *= size;
+ qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
+ ql_log(ql_log_info, vha, 0xd014,
+ "Actual exchange offload count: %d.\n", actual_cnt);
+
+ totsz = actual_cnt * size;
- if (temp != ha->exchoffld_size) {
+ if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
- ha->exchoffld_size = temp;
+ ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
- "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
- max_cnt, size, temp);
+ "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
+ max_cnt, actual_cnt, size, totsz);
ql_log(ql_log_info, vha, 0xd017,
"Exchange Buffers requested size = 0x%x\n",
@@ -4288,7 +4330,21 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
if (!ha->exchoffld_buf) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
- "Failed to allocate memory for exchoffld_buf_dma.\n");
+ "Failed to allocate memory for Exchange Offload.\n");
+
+ if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
+ ha->max_exchg -= REDUCE_EXCHANGES_CNT;
+ } else if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + 512)) {
+ ha->max_exchg -= 512;
+ } else {
+ ha->flags.exchoffld_enabled = 0;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Disabling Exchange offload due to lack of memory\n");
+ }
+ ha->exchoffld_size = 0;
+
return -ENOMEM;
}
}
@@ -4514,6 +4570,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
INIT_LIST_HEAD(&vha->nvme_rport_list);
+ INIT_LIST_HEAD(&vha->gpnid_list);
+ INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -4531,6 +4589,19 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
return NULL;
}
+ /* todo: what about ext login? */
+ vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
+ vha->scan.l = vmalloc(vha->scan.size);
+ if (!vha->scan.l) {
+ ql_log(ql_log_fatal, vha, 0xd04a,
+ "Alloc failed for scan database.\n");
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
+ vha->gnl.l, vha->gnl.ldma);
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
+ INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
@@ -4566,15 +4637,18 @@ int
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
unsigned long flags;
+ bool q = false;
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
+
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+
spin_unlock_irqrestore(&vha->work_lock, flags);
- if (QLA_EARLY_LINKUP(vha->hw))
- schedule_work(&vha->iocb_work);
- else
- qla2xxx_wake_dpc(vha);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
return QLA_SUCCESS;
}
@@ -4623,6 +4697,7 @@ int qla2x00_post_async_##name##_work( \
e->u.logio.data[0] = data[0]; \
e->u.logio.data[1] = data[1]; \
} \
+ fcport->flags |= FCF_ASYNC_ACTIVE; \
return qla2x00_post_work(vha, e); \
}
@@ -4631,6 +4706,8 @@ qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
+qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
+qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -4699,6 +4776,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
+ u64 wwn;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC enter\n",
+ __func__, __LINE__, e->u.new_sess.port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4706,6 +4788,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
if (pla) {
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ memcpy(fcport->node_name,
+ pla->iocb.u.isp24.u.plogi.node_name,
+ WWN_SIZE);
qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
/* we took an extra ref_count to prevent PLOGI ACK when
* fcport/sess has not been created.
@@ -4717,9 +4802,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (fcport) {
fcport->d_id = e->u.new_sess.id;
- fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI)
+ fcport->fc4_type = FC4_TYPE_FCP_SCSI;
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
@@ -4734,7 +4820,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- /* search again to make sure one else got ahead */
+ /* search again to make sure no one else got ahead */
tfcp = qla2x00_find_fcport_by_wwpn(vha,
e->u.new_sess.port_name, 1);
if (tfcp) {
@@ -4748,20 +4834,82 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
} else {
list_add_tail(&fcport->list, &vha->vp_fcports);
- if (pla) {
- qlt_plogi_ack_link(vha, pla, fcport,
- QLT_PLOGI_LINK_SAME_WWN);
- pla->ref_count--;
- }
+ }
+ if (pla) {
+ qlt_plogi_ack_link(vha, pla, fcport,
+ QLT_PLOGI_LINK_SAME_WWN);
+ pla->ref_count--;
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (pla)
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ fcport->id_changed = 1;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
+
+ if (pla) {
+ if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
+ u16 wd3_lo;
+
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->local = 0;
+ fcport->loop_id =
+ le16_to_cpu(
+ pla->iocb.u.isp24.nport_handle);
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo =
+ le16_to_cpu(
+ pla->iocb.u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ fcport->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
qlt_plogi_ack_unref(vha, pla);
- else
- qla24xx_async_gffid(vha, fcport);
+ } else {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ tfcp = qla2x00_find_fcport_by_nportid(vha,
+ &e->u.new_sess.id, 1);
+ if (tfcp && (tfcp != fcport)) {
+ /*
+ * We have a conflict fcport with same NportID.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC found conflict b4 add. DS %d LS %d\n",
+ __func__, tfcp->port_name, tfcp->disc_state,
+ tfcp->fw_login_state);
+
+ switch (tfcp->disc_state) {
+ case DSC_DELETED:
+ break;
+ case DSC_DELETE_PEND:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ break;
+ default:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ qlt_schedule_sess_for_deletion(tfcp);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ wwn = wwn_to_u64(fcport->node_name);
+
+ if (!wwn)
+ qla24xx_async_gnnid(vha, fcport);
+ else
+ qla24xx_async_gnl(vha, fcport);
+ }
}
if (free_fcport) {
@@ -4771,6 +4919,20 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
}
+static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ struct srb *sp = e->u.iosb.sp;
+ int rval;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "%s: %s: Re-issue IOCB failed (%d).\n",
+ __func__, sp->name, rval);
+ qla24xx_sp_unmap(vha, sp);
+ }
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -4824,8 +4986,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
- case QLA_EVT_GPNID_DONE:
- qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+ case QLA_EVT_UNMAP:
+ qla24xx_sp_unmap(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_RELOGIN:
+ qla2x00_relogin(vha);
break;
case QLA_EVT_NEW_SESS:
qla24xx_create_new_sess(vha, e);
@@ -4849,6 +5014,30 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_NACK:
qla24xx_do_nack_work(vha, e);
break;
+ case QLA_EVT_ASYNC_PRLO:
+ qla2x00_async_prlo(vha, e->u.logio.fcport);
+ break;
+ case QLA_EVT_ASYNC_PRLO_DONE:
+ qla2x00_async_prlo_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_GPNFT:
+ qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
+ break;
+ case QLA_EVT_GPNFT_DONE:
+ qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNFT_DONE:
+ qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNID:
+ qla24xx_async_gnnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GFPNID:
+ qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_SP_RETRY:
+ qla_sp_retry(vha, e);
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4858,6 +5047,20 @@ qla2x00_do_work(struct scsi_qla_host *vha)
}
}
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
+
+ if (!e) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ return qla2x00_post_work(vha, e);
+}
+
/* Relogins all the fcports of a vport
* Context: dpc thread
*/
@@ -4868,14 +5071,14 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- /*
- * If the port is not ONLINE then try to login
- * to it if we haven't run out of retries.
- */
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
- fcport->login_retry--;
- if (fcport->flags & FCF_FABRIC_DEVICE) {
+ fcport->login_retry &&
+ !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
@@ -4884,7 +5087,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.event = FCME_RELOGIN;
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
- } else {
+ } else if (vha->hw->current_topology == ISP_CFG_NL) {
+ fcport->login_retry--;
status = qla2x00_local_device_login(vha,
fcport);
if (status == QLA_SUCCESS) {
@@ -4912,6 +5116,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+
+ ql_dbg(ql_dbg_disc, vha, 0x400e,
+ "Relogin end.\n");
}
/* Schedule work on any of the dpc-workqueues */
@@ -5687,8 +5894,6 @@ qla2x00_do_dpc(void *data)
if (test_bit(UNLOADING, &base_vha->dpc_flags))
break;
- qla2x00_do_work(base_vha);
-
if (IS_P3P_TYPE(ha)) {
if (IS_QLA8044(ha)) {
if (test_and_clear_bit(ISP_UNRECOVERABLE,
@@ -5867,16 +6072,19 @@ qla2x00_do_dpc(void *data)
}
/* Retry each device up to login retry count */
- if ((test_and_clear_bit(RELOGIN_NEEDED,
- &base_vha->dpc_flags)) &&
+ if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
- "Relogin scheduled.\n");
- qla2x00_relogin(base_vha);
- ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
- "Relogin end.\n");
+ if (!base_vha->relogin_jif ||
+ time_after_eq(jiffies, base_vha->relogin_jif)) {
+ base_vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
+
+ ql_dbg(ql_dbg_disc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
+ qla24xx_post_relogin_work(base_vha);
+ }
}
loop_resync_check:
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
@@ -6135,8 +6343,17 @@ qla2x00_timer(struct timer_list *t)
}
/* Process any deferred work. */
- if (!list_empty(&vha->work_list))
- start_dpc++;
+ if (!list_empty(&vha->work_list)) {
+ unsigned long flags;
+ bool q = false;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
+ }
/*
* FC-NVME
@@ -6580,37 +6797,16 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
-static void
-qla83xx_disable_laser(scsi_qla_host_t *vha)
-{
- uint32_t reg, data, fn;
- struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
-
- /* pci func #/port # */
- ql_dbg(ql_dbg_init, vha, 0x004b,
- "Disabling Laser for hba: %p\n", vha);
-
- fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
- (BIT_15|BIT_14|BIT_13|BIT_12));
-
- fn = (fn >> 12);
-
- if (fn & 1)
- reg = PORT_1_2031;
- else
- reg = PORT_0_2031;
-
- data = LASER_OFF_2031;
-
- qla83xx_wr_reg(vha, reg, data);
-}
-
static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
+ int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
- return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ if (USER_CTRL_IRQ(vha->hw))
+ rc = blk_mq_map_queues(&shost->tag_set);
+ else
+ rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ return rc;
}
static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b4336e0cd85f..d2db86ea06b2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2461,6 +2461,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
sec_mask = 0x1e000;
break;
}
+ /* fall through */
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 18069edd4773..fc89af8fe256 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -75,7 +75,8 @@ MODULE_PARM_DESC(ql2xuctrlirq,
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
-static int temp_sam_status = SAM_STAT_BUSY;
+static int qla_sam_status = SAM_STAT_BUSY;
+static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
/*
* From scsi/fc/fc_fcp.h
@@ -208,7 +209,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
return host;
@@ -309,17 +310,17 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
- ql_dbg(ql_dbg_async, vha, 0x502f,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
} else if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0x503a,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
&u->atio, ha_locked, 0);
} else {
- ql_dbg(ql_dbg_async, vha, 0x503d,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
@@ -450,6 +451,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
+ /* fall through */
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -606,7 +608,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
__func__, __LINE__,
sp->fcport->port_name,
vha->fcport_count);
-
+ sp->fcport->disc_state = DSC_UPD_FCPORT;
qla24xx_post_upd_fcport_work(vha, sp->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20f5,
@@ -665,7 +667,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
-
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_nack_sp_done;
rval = qla2x00_start_sp(sp);
@@ -861,7 +863,10 @@ void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
fcport->d_id = port_id;
- qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ else
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
@@ -890,6 +895,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
pla->ref_count, pla, link);
+ if (link == QLT_PLOGI_LINK_CONFLICT) {
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ pla->ref_count--;
+ return;
+ default:
+ break;
+ }
+ }
+
if (sess->plogi_link[link])
qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
@@ -954,8 +970,9 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- struct event_arg ea;
scsi_qla_host_t *base_vha;
+ struct qlt_plogi_ack_t *own =
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
@@ -971,19 +988,35 @@ static void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ sess->send_els_logo = 0;
qlt_send_first_logo(vha, &logo);
}
- if (sess->logout_on_delete) {
+ if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
int rc;
- rc = qla2x00_post_async_logout_work(vha, sess, NULL);
- if (rc != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0xf085,
- "Schedule logo failed sess %p rc %d\n",
- sess, rc);
- else
- logout_started = true;
+ if (!own ||
+ (own &&
+ (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ rc = qla2x00_post_async_logout_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ } else if (own && (own->iocb.u.isp24.status_subcode ==
+ ELS_PRLI) && ha->flags.rida_fmt2) {
+ rc = qla2x00_post_async_prlo_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule PRLO failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
}
}
@@ -1007,7 +1040,7 @@ static void qlt_free_session_done(struct work_struct *work)
}
ql_dbg(ql_dbg_disc, vha, 0xf087,
- "%s: sess %p logout completed\n",__func__, sess);
+ "%s: sess %p logout completed\n", __func__, sess);
}
if (sess->logo_ack_needed) {
@@ -1033,8 +1066,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->login_succ = 0;
}
- if (sess->chip_reset != ha->base_qpair->chip_reset)
- qla2x00_clear_loop_id(sess);
+ qla2x00_clear_loop_id(sess);
if (sess->conflict) {
sess->conflict->login_pause = 0;
@@ -1044,8 +1076,6 @@ static void qlt_free_session_done(struct work_struct *work)
}
{
- struct qlt_plogi_ack_t *own =
- sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
@@ -1076,6 +1106,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
}
}
+
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
@@ -1089,14 +1120,24 @@ static void qlt_free_session_done(struct work_struct *work)
wake_up_all(&vha->fcport_waitQ);
base_vha = pci_get_drvdata(ha->pdev);
+
+ sess->free_pending = 0;
+
if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
return;
- if (!tgt || !tgt->tgt_stop) {
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_DELETE_DONE;
- ea.fcport = sess;
- qla2x00_fcport_event_handler(vha, &ea);
+ if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+ case MODE_TARGET:
+ default:
+ /* no-op */
+ break;
+ }
}
}
@@ -1104,11 +1145,20 @@ static void qlt_free_session_done(struct work_struct *work)
void qlt_unreg_sess(struct fc_port *sess)
{
struct scsi_qla_host *vha = sess->vha;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
"%s sess %p for deletion %8phC\n",
__func__, sess, sess->port_name);
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->free_pending) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
+ sess->free_pending = 1;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
@@ -1175,10 +1225,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
}
/* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_schedule_sess_for_deletion(struct fc_port *sess,
- bool immediate)
+void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
+ unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
return;
@@ -1194,27 +1244,28 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
return;
}
- sess->disc_state = DSC_DELETE_PEND;
-
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
+ sess->disc_state = DSC_DELETE_PEND;
+
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- schedule_work(&sess->del_work);
-}
-
-void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
-{
- unsigned long flags;
- struct qla_hw_data *ha = sess->vha->hw;
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_schedule_sess_for_deletion(sess, 1);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ /* use cancel to push work element through before re-queue */
+ cancel_work_sync(&sess->del_work);
+ INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
+ queue_work(sess->vha->hw->wq, &sess->del_work);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -1225,7 +1276,7 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
list_for_each_entry(sess, &vha->vp_fcports, list) {
if (sess->se_sess)
- qlt_schedule_sess_for_deletion(sess, 1);
+ qlt_schedule_sess_for_deletion(sess);
}
/* At this point tgt could be already dead */
@@ -1400,7 +1451,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
sess->local = 1;
- qlt_schedule_sess_for_deletion(sess, false);
+ qlt_schedule_sess_for_deletion(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -1560,8 +1611,11 @@ static void qlt_release(struct qla_tgt *tgt)
btree_destroy64(&tgt->lun_qpair_map);
- if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
- ha->tgt.tgt_ops->remove_target(vha);
+ if (vha->vp_idx)
+ if (ha->tgt.tgt_ops &&
+ ha->tgt.tgt_ops->remove_target &&
+ vha->vha_tgt.target_lport_ptr)
+ ha->tgt.tgt_ops->remove_target(vha);
vha->vha_tgt.qla_tgt = NULL;
@@ -1976,15 +2030,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
"qla_target(%d): task abort for non-existant session\n",
vha->vp_idx);
- rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
- QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
-
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- if (rc != 0) {
- qlt_24xx_send_abts_resp(ha->base_qpair, abts,
- FCP_TMF_REJECTED, false);
- }
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2174,7 +2223,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
- qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+ qlt_schedule_sess_for_deletion(mcmd->sess);
} else {
qlt_send_notify_ack(vha->hw->base_qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
@@ -3708,7 +3757,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
term = 1;
if (term)
- qlt_term_ctio_exchange(qpair, ctio, cmd, status);
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3869,7 +3918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
"%s %d %8phC post del sess\n",
__func__, __LINE__, cmd->sess->port_name);
- qlt_schedule_sess_for_deletion_lock(cmd->sess);
+ qlt_schedule_sess_for_deletion(cmd->sess);
}
break;
}
@@ -4204,76 +4253,6 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
return cmd;
}
-static void qlt_create_sess_from_atio(struct work_struct *work)
-{
- struct qla_tgt_sess_op *op = container_of(work,
- struct qla_tgt_sess_op, work);
- scsi_qla_host_t *vha = op->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
- uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_del(&op->cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-
- if (op->aborted) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
- "sess_op with tag %u is aborted\n",
- op->atio.u.isp24.exchange_addr);
- goto out_term;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
-
- if (op->atio.u.raw.entry_count > 1) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
- goto out_term;
- }
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has an extra creation ref. */
-
- if (!sess)
- goto out_term;
- /*
- * Now obtain a pre-allocated session tag using the original op->atio
- * packet header, and dispatch into __qlt_do_work() using the existing
- * process context.
- */
- cmd = qlt_get_tag(vha, sess, &op->atio);
- if (!cmd) {
- struct qla_qpair *qpair = ha->base_qpair;
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- kfree(op);
- return;
- }
-
- /*
- * __qlt_do_work() will call qlt_put_sess() to release
- * the extra reference taken above by qlt_make_local_sess()
- */
- __qlt_do_work(cmd);
- kfree(op);
- return;
-out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
- kfree(op);
-}
-
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
@@ -4283,31 +4262,23 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct fc_port *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
+ port_id_t id;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while device %p is shutting down\n", tgt);
- return -EFAULT;
+ return -ENODEV;
}
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
- if (unlikely(!sess)) {
- struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
- GFP_ATOMIC);
- if (!op)
- return -ENOMEM;
-
- memcpy(&op->atio, atio, sizeof(*atio));
- op->vha = vha;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
+ id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
+ id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+ if (IS_SW_RESV_ADDR(id))
+ return -EBUSY;
- INIT_WORK(&op->work, qlt_create_sess_from_atio);
- queue_work(qla_tgt_wq, &op->work);
- return 0;
- }
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+ if (unlikely(!sess))
+ return -EFAULT;
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
@@ -4336,7 +4307,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return -ENOMEM;
+ return -EBUSY;
}
cmd->cmd_in_wq = 1;
@@ -4417,14 +4388,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt;
struct fc_port *sess;
u64 unpacked_lun;
int fn;
unsigned long flags;
- tgt = vha->vha_tgt.qla_tgt;
-
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4435,15 +4403,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
unpacked_lun =
scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
- if (!sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
- "qla_target(%d): task mgmt fn 0x%x for "
- "non-existant session\n", vha->vp_idx, fn);
- return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
- sizeof(struct atio_from_isp));
- }
-
- if (sess->deleted)
+ if (sess == NULL || sess->deleted)
return -EFAULT;
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4574,7 +4534,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* might have cleared it when requested this session
* deletion, so don't touch it
*/
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
} else {
/*
* Another wwn used to have our s_id/loop_id
@@ -4584,11 +4544,10 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
-
other_sess->keep_nport_handle = 1;
- *conflict_sess = other_sess;
- qlt_schedule_sess_for_deletion(other_sess,
- true);
+ if (other_sess->disc_state != DSC_DELETED)
+ *conflict_sess = other_sess;
+ qlt_schedule_sess_for_deletion(other_sess);
}
continue;
}
@@ -4602,7 +4561,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* Same loop_id but different s_id
* Ok to kill and logout */
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
}
}
@@ -4652,6 +4611,138 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
return count;
}
+static int qlt_handle_login(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct fc_port *sess = NULL, *conflict_sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id, wd3_lo;
+ int res = 0;
+ struct qlt_plogi_ack_t *pla;
+ unsigned long flags;
+
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ /* Mark all stale commands sitting in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ sess = qlt_find_sess_invalidate_other(vha, wwn,
+ port_id, loop_id, &conflict_sess);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
+ goto out;
+ }
+
+ pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
+ if (!pla) {
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
+ if (conflict_sess) {
+ conflict_sess->login_gen++;
+ qlt_plogi_ack_link(vha, pla, conflict_sess,
+ QLT_PLOGI_LINK_CONFLICT);
+ }
+
+ if (!sess) {
+ pla->ref_count++;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name,
+ iocb->u.isp24.u.plogi.node_name,
+ pla, FC4_TYPE_UNKNOWN);
+ else
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name, NULL,
+ pla, FC4_TYPE_UNKNOWN);
+
+ goto out;
+ }
+
+ qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
+ sess->d_id = port_id;
+ sess->login_gen++;
+
+ if (iocb->u.isp24.status_subcode == ELS_PRLI) {
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->d_id = port_id;
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ sess->port_type = FCT_INITIATOR;
+ else
+ sess->port_type = FCT_TARGET;
+
+ } else
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
+
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__, sess->port_name, sess->disc_state);
+
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+ break;
+
+ default:
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->d_id.b24 == port_id.b24));
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
+
+
+ qlt_schedule_sess_for_deletion(sess);
+ break;
+ }
+out:
+ return res;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -4666,7 +4757,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
- struct qlt_plogi_ack_t *pla;
unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4690,88 +4780,32 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
*/
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
+ res = qlt_handle_login(vha, iocb);
+ break;
- /* Mark all stale commands in qla_tgt_wq for deletion */
- abort_cmds_for_s_id(vha, &port_id);
-
- if (wwn) {
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(vha, wwn,
- port_id, loop_id, &conflict_sess);
- spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
- }
-
- if (IS_SW_RESV_ADDR(port_id)) {
- res = 1;
- break;
- }
-
- pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
- if (!pla) {
- qlt_send_term_imm_notif(vha, iocb, 1);
- break;
- }
-
- res = 0;
+ case ELS_PRLI:
+ if (N2N_TOPO(ha)) {
+ sess = qla2x00_find_fcport_by_wwpn(vha,
+ iocb->u.isp24.port_name, 1);
- if (conflict_sess) {
- conflict_sess->login_gen++;
- qlt_plogi_ack_link(vha, pla, conflict_sess,
- QLT_PLOGI_LINK_CONFLICT);
- }
+ if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
- if (!sess) {
- pla->ref_count++;
- qla24xx_post_newsess_work(vha, &port_id,
- iocb->u.isp24.port_name, pla);
- res = 0;
+ res = qlt_handle_login(vha, iocb);
break;
}
- qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
- sess->fw_login_state = DSC_LS_PLOGI_PEND;
- sess->d_id = port_id;
- sess->login_gen++;
-
- switch (sess->disc_state) {
- case DSC_DELETED:
- qlt_plogi_ack_unref(vha, pla);
- break;
-
- default:
- /*
- * Under normal circumstances we want to release nport handle
- * during LOGO process to avoid nport handle leaks inside FW.
- * The exception is when LOGO is done while another PLOGI with
- * the same nport handle is waiting as might be the case here.
- * Note: there is always a possibily of a race where session
- * deletion has already started for other reasons (e.g. ACL
- * removal) and now PLOGI arrives:
- * 1. if PLOGI arrived in FW after nport handle has been freed,
- * FW must have assigned this PLOGI a new/same handle and we
- * can proceed ACK'ing it as usual when session deletion
- * completes.
- * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
- * bit reached it, the handle has now been released. We'll
- * get an error when we ACK this PLOGI. Nothing will be sent
- * back to initiator. Initiator should eventually retry
- * PLOGI and situation will correct itself.
- */
- sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
- (sess->d_id.b24 == port_id.b24));
-
- ql_dbg(ql_dbg_disc, vha, 0x20f9,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, sess->port_name);
-
-
- qlt_schedule_sess_for_deletion_lock(sess);
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
break;
}
- break;
-
- case ELS_PRLI:
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
if (wwn) {
@@ -4782,17 +4816,51 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (conflict_sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
- "PRLI with conflicting sess %p port %8phC\n",
- conflict_sess, conflict_sess->port_name);
- qlt_send_term_imm_notif(vha, iocb, 1);
- res = 0;
- break;
+ switch (conflict_sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+ "PRLI with conflicting sess %p port %8phC\n",
+ conflict_sess, conflict_sess->port_name);
+ conflict_sess->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
}
if (sess != NULL) {
- if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
- sess->fw_login_state != DSC_LS_PLOGI_COMP) {
+ bool delete = false;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+ switch (sess->fw_login_state) {
+ case DSC_LS_PLOGI_PEND:
+ case DSC_LS_PLOGI_COMP:
+ case DSC_LS_PRLI_COMP:
+ break;
+ default:
+ delete = true;
+ break;
+ }
+
+ switch (sess->disc_state) {
+ case DSC_LOGIN_PEND:
+ case DSC_GPDB:
+ case DSC_GPSC:
+ case DSC_UPD_FCPORT:
+ case DSC_LOGIN_COMPLETE:
+ case DSC_ADISC:
+ delete = false;
+ break;
+ default:
+ break;
+ }
+
+ if (delete) {
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4803,6 +4871,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
+ flags);
break;
}
@@ -4826,6 +4896,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->port_type = FCT_INITIATOR;
else
sess->port_type = FCT_TARGET;
+
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
res = 1; /* send notify ack */
@@ -4863,7 +4935,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 1;
break;
}
- /* drop through */
+ /* fall through */
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4892,7 +4964,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
} else {
/* cmd did not go to upper layer. */
if (sess) {
- qlt_schedule_sess_for_deletion_lock(sess);
+ qlt_schedule_sess_for_deletion(sess);
res = 0;
}
/* else logo will be ack */
@@ -4930,6 +5002,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ ql_dbg(ql_dbg_disc, vha, 0xf026,
+ "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
+ vha->vp_idx, iocb->u.isp24.status_subcode, res);
+
return res;
}
@@ -5320,7 +5396,6 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
- uint16_t status;
unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
@@ -5328,8 +5403,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- status = temp_sam_status;
- qlt_send_busy(qpair, atio, status);
+ qlt_send_busy(qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5344,7 +5418,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
- unsigned long flags;
+ unsigned long flags = 0;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0x3064,
@@ -5368,8 +5442,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_TASK_SET_FULL);
+ qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
@@ -5388,42 +5461,37 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_task_mgmt(vha, atio);
}
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
- if (!ha_locked)
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
-
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_BUSY);
-#else
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
atio, 1, 0);
-#endif
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe059,
- "qla_target: Unable to send "
- "command to target for req, "
- "ignoring.\n");
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe05a,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status.\n", vha->vp_idx);
- if (!ha_locked)
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair,
- atio, SAM_STAT_BUSY);
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ qla_sam_status);
+ break;
}
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
}
break;
@@ -5506,27 +5574,31 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(rsp->qpair, atio, 0);
-#else
- qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
-#endif
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe05f,
- "qla_target: Unable to send "
- "command to target, sending TERM "
- "EXCHANGE for rsp\n");
- qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe060,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status\n", vha->vp_idx);
- qlt_send_busy(rsp->qpair, atio, 0);
- }
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
+ qlt_send_term_exchange(rsp->qpair, NULL,
+ atio, 1, 0);
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ qla_sam_status);
+ break;
}
}
}
@@ -5755,7 +5827,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
unsigned long flags;
u8 newfcport = 0;
- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
"qla_target(%d): Allocation of tmp FC port failed",
@@ -5784,6 +5856,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
tfcp->port_type = fcport->port_type;
tfcp->supported_classes = fcport->supported_classes;
tfcp->flags |= fcport->flags;
+ tfcp->scan_state = QLA_FCPORT_FOUND;
del = fcport;
fcport = tfcp;
@@ -6445,18 +6518,21 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
qlt_add_target(ha, vha);
}
-void
-qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+u8
+qlt_rff_id(struct scsi_qla_host *vha)
{
+ u8 fc4_feature = 0;
/*
* FC-4 Feature bit 0 indicates target functionality to the name server.
*/
if (qla_tgt_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_0;
+ fc4_feature = BIT_0;
} else if (qla_ini_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ fc4_feature = BIT_1;
} else if (qla_dual_mode_enabled(vha))
- ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ fc4_feature = BIT_0 | BIT_1;
+
+ return fc4_feature;
}
/*
@@ -6546,7 +6622,9 @@ void
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb;
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6554,19 +6632,28 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- icb = (struct init_cb_24xx *)ha->init_cb;
-
- if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
- struct qla_msix_entry *msix = &ha->msix_entries[2];
-
- icb->msix_atio = cpu_to_le16(msix->entry);
- ql_dbg(ql_dbg_init, vha, 0xf072,
- "Registering ICB vector 0x%x for atio que.\n",
- msix->entry);
- } else if (ql2xenablemsix == 0) {
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- ql_dbg(ql_dbg_init, vha, 0xf07f,
- "Registering INTx vector for ATIO.\n");
+ if (ha->flags.msix_enabled) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA2071(ha)) {
+ /* 4 ports Baker: Enable Interrupt Handshake */
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ } else {
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= ~BIT_26;
+ }
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
+ }
+ } else {
+ /* INTx|MSI */
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "%s: Use INTx for ATIOQ.\n", __func__);
+ }
}
}
@@ -6574,6 +6661,7 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6625,6 +6713,14 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ if (IS_QLA25XX(ha)) {
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
+ }
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6679,6 +6775,7 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6729,6 +6826,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6991,20 +7094,14 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
- unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
if (!vha->d_id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
} else if (vha->d_id.b24 != id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index aba58d3848a6..bb67b5a284a8 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -993,7 +993,7 @@ struct qla_tgt_prm {
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
- ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+ ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
#define QLA_TGT_XMIT_DATA 1
#define QLA_TGT_XMIT_STATUS 2
@@ -1072,7 +1072,7 @@ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
-extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern u8 qlt_rff_id(struct scsi_qla_host *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 733e8dcccf5c..731ca0d8520a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -526,7 +526,8 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20c,
"%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
- if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ switch (ent->t268.buf_type) {
+ case T268_BUF_TYPE_EXTD_TRACE:
if (vha->hw->eft) {
if (buf) {
ent->t268.buf_size = EFT_SIZE;
@@ -538,10 +539,43 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
"%s: missing eft\n", __func__);
qla27xx_skip_entry(ent, buf);
}
- } else {
- ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ break;
+ case T268_BUF_TYPE_EXCH_BUFOFF:
+ if (vha->hw->exchoffld_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exchoffld_size;
+ ent->t268.start_addr =
+ vha->hw->exchoffld_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exchoffld_buf,
+ vha->hw->exchoffld_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing exch offld\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+ case T268_BUF_TYPE_EXTD_LOGIN:
+ if (vha->hw->exlogin_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exlogin_size;
+ ent->t268.start_addr =
+ vha->hw->exlogin_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exlogin_buf,
+ vha->hw->exlogin_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing ext login\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
qla27xx_skip_entry(ent, buf);
+ break;
}
return false;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b6ec02b96d3d..549bef9afddd 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.02-k"
+#define QLA2XXX_VERSION "10.00.00.05-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3f82ea1b72dc..aadfeaac3898 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1635,16 +1635,13 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return rc;
}
- lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
- 65536);
+ lport->lport_loopid_map = vzalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
if (!lport->lport_loopid_map) {
pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
- memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
- * 65536);
pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
return 0;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 5d6d158bbfd6..52b1a0bc93c9 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,15 +153,14 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
goto exit_get_sys_info_no_free;
}
- memset(sys_info, 0, sizeof(*sys_info));
/* Get flash sys info */
if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 1da04f323d38..bda2e64ee5ca 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,15 +625,14 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
goto exit_init_fw_cb_no_free;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
@@ -710,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
@@ -720,7 +719,6 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
@@ -1342,16 +1340,15 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
return status;
}
- memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e91abb327745..968bd85610f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4050,15 +4050,14 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
return status;
}
- memset(sys_info, 0, sizeof(*sys_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2b8a8ce2a431..82e889bbe0ed 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2689,16 +2689,15 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
return -ENOMEM;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
@@ -4196,15 +4195,14 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
goto mem_alloc_error_exit;
}
- memset(ha->queues, 0, ha->queues_len);
/*
* As per RISC alignment requirements -- the bus-address must be a
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 40bc616cf8ab..90349498f686 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -12,7 +12,7 @@
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
- * encouraged once assigned by ANSI/INCITS T10
+ * encouraged once assigned by ANSI/INCITS T10).
*/
static const char *const scsi_device_types[] = {
"Direct-Access ",
@@ -39,7 +39,7 @@ static const char *const scsi_device_types[] = {
};
/**
- * scsi_device_type - Return 17 char string indicating device type.
+ * scsi_device_type - Return 17-char string indicating device type.
* @type: type number to look up
*/
const char *scsi_device_type(unsigned type)
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(scsi_device_type);
* @scsilun: struct scsi_lun to be converted.
*
* Description:
- * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
+ * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered
* integer, and return the result. The caller must check for
* truncation before using this function.
*
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(scsilun_to_int);
* back into the lun value.
*
* Notes:
- * Given an integer : 0x0b03d204, this function returns a
+ * Given an integer : 0x0b03d204, this function returns a
* struct scsi_lun of: d2 04 0b 03 00 00 00 00
*
*/
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
/**
* scsi_build_sense_buffer - build sense data in a buffer
- * @desc: Sense format (non zero == descriptor format,
+ * @desc: Sense format (non-zero == descriptor format,
* 0 == fixed format)
* @buf: Where to build sense data
* @key: Sense key
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(scsi_build_sense_buffer);
* @info: 64-bit information value to be set
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
**/
int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
{
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(scsi_set_sense_information);
* @cd: command/data bit
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
*/
int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e4f037f0f38b..a5986dae9020 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,7 +6,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2016 Douglas Gilbert
+ * Copyright (C) 2001 - 2017 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -61,8 +61,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "1.86"
-static const char *sdebug_version_date = "20160430";
+#define SDEBUG_VERSION "0187" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20171202";
#define MY_NAME "scsi_debug"
@@ -93,6 +93,7 @@ static const char *sdebug_version_date = "20160430";
#define MISCOMPARE_VERIFY_ASC 0x1d
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
+#define WRITE_ERROR_ASC 0xc
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,6 +106,7 @@ static const char *sdebug_version_date = "20160430";
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
#define DEF_ATO 1
+#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
#define DEF_DEV_SIZE_MB 8
#define DEF_DIF 0
@@ -161,12 +163,14 @@ static const char *sdebug_version_date = "20160430";
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
- SDEBUG_OPT_SHORT_TRANSFER)
+ SDEBUG_OPT_SHORT_TRANSFER | \
+ SDEBUG_OPT_HOST_BUSY)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -232,7 +236,7 @@ static const char *sdebug_version_date = "20160430";
#define F_M_ACCESS 0x800 /* media access */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
-#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
#define SDEBUG_MAX_PARTS 4
@@ -263,12 +267,18 @@ struct sdebug_host_info {
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
+enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
+ SDEB_DEFER_WQ = 2};
+
struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
int sqa_idx; /* index of sdebug_queue array */
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
int issuing_cpu;
+ bool init_hrt;
+ bool init_wq;
+ enum sdeb_defer_type defer_t;
};
struct sdebug_queued_cmd {
@@ -282,6 +292,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dif:1;
unsigned int inj_dix:1;
unsigned int inj_short:1;
+ unsigned int inj_host_busy:1;
};
struct sdebug_queue {
@@ -304,8 +315,8 @@ struct opcode_info_t {
u32 flags; /* OR-ed set of SDEB_F_* */
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
const struct opcode_info_t *arrp; /* num_attached elements or NULL */
- u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
- /* ignore cdb bytes after position 15 */
+ u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
+ /* 1 to min(cdb_len, 15); ignore cdb[15...] */
};
/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
@@ -322,12 +333,12 @@ enum sdeb_opcode_index {
SDEB_I_READ = 9, /* 6, 10, 12, 16 */
SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
SDEB_I_START_STOP = 11,
- SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
- SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
+ SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
+ SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
SDEB_I_VERIFY = 16, /* 10 only */
- SDEB_I_VARIABLE_LEN = 17,
+ SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
@@ -340,7 +351,7 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 27, /* 10, 16 */
SDEB_I_SYNC_CACHE = 28, /* 10 only */
SDEB_I_COMP_WRITE = 29,
- SDEB_I_LAST_ELEMENT = 30, /* keep this last */
+ SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
};
@@ -372,12 +383,12 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
+ 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
SDEB_I_MAINT_OUT, 0, 0, 0,
- SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
- 0, 0, 0, 0,
+ SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
+ 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xc0; 0xc0->0xff: vendor specific */
@@ -396,6 +407,7 @@ static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -409,72 +421,81 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
-static const struct opcode_info_t msense_iarr[1] = {
+/*
+ * The following are overflow arrays for cdbs that "hit" the same index in
+ * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
+ * should be placed in opcode_info_arr[], the others should be placed here.
+ */
+static const struct opcode_info_t msense_iarr[] = {
{0, 0x1a, 0, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t mselect_iarr[1] = {
+static const struct opcode_info_t mselect_iarr[] = {
{0, 0x15, 0, F_D_OUT, NULL, NULL,
{6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t read_iarr[3] = {
- {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+static const struct opcode_info_t read_iarr[] = {
+ {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
+ {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
{6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
- {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
+ {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
+ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t write_iarr[3] = {
- {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
- {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
- 0, 0, 0, 0} },
- {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
- {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
- {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
- 0xc7, 0, 0, 0, 0} },
+static const struct opcode_info_t write_iarr[] = {
+ {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
+ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+ {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
+ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
+ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t sa_in_iarr[1] = {
+static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0, 0xc7} },
+ 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
};
-static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
- {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
- NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
+static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
+ {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
+ {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
+ 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
};
-static const struct opcode_info_t maint_in_iarr[2] = {
+static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
{12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
- 0xc7, 0, 0, 0, 0} },
+ 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
{12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
};
-static const struct opcode_info_t write_same_iarr[1] = {
- {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
+static const struct opcode_info_t write_same_iarr[] = {
+ {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1f, 0xc7} },
+ 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
};
-static const struct opcode_info_t reserve_iarr[1] = {
- {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+static const struct opcode_info_t reserve_iarr[] = {
+ {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
{6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t release_iarr[1] = {
- {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+static const struct opcode_info_t release_iarr[] = {
+ {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
{6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -484,57 +505,67 @@ static const struct opcode_info_t release_iarr[1] = {
* REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 0 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
+ {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
{6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
{12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORT LUNS */
{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
{6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
- {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
- 0} },
- {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
- {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
+/* 5 */
+ {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
+ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
+ 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
+ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
+ 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
{10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0, 0, 0} },
- {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
+ {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
{10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0, 0} },
- {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
- {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
+ {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
+ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
/* 10 */
- {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
- {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
+ {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
+ resp_write_dt0, write_iarr, /* WRITE(16) */
+ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* WRITE(16) */
{0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
- {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
- {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
- 0} },
+ {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
+ resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
+ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
+ {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
+ {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
+ resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
+ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0, 0xc7, 0, 0, 0, 0} },
+/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
- vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
- 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
- {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
+ {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
+ resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
+ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
+ 0xff, 0xff} },
+ {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
+ NULL, reserve_iarr, /* RESERVE(10) <no response function> */
{10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
- {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
+ {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
+ NULL, release_iarr, /* RELEASE(10) <no response function> */
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
@@ -546,23 +577,25 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
{6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
- {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
- NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
+ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+/* 25 */
+ {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
+ NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
- {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
- write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
- 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
- {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
+ resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
+ 0, 0, 0, 0, 0} },
+ {0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
+ {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
+ {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
- 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
+ 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
/* 30 */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
@@ -571,6 +604,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
static int sdebug_add_host = DEF_NUM_HOST;
static int sdebug_ato = DEF_ATO;
+static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
static int sdebug_dif = DEF_DIF;
@@ -797,6 +831,61 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
/* return -ENOTTY; // correct return but upsets fdisk */
}
+static void config_cdb_len(struct scsi_device *sdev)
+{
+ switch (sdebug_cdb_len) {
+ case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = true;
+ break;
+ case 16:
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ case 32: /* No knobs to suggest this so same as 16 for now */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ default:
+ pr_warn("unexpected cdb_len=%d, force to 10\n",
+ sdebug_cdb_len);
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ sdebug_cdb_len = 10;
+ break;
+ }
+}
+
+static void all_config_cdb_len(void)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ shost = sdbg_host->shost;
+ shost_for_each_device(sdev, shost) {
+ config_cdb_len(sdev);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
struct sdebug_host_info *sdhp;
@@ -955,7 +1044,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static char sdebug_inq_vendor_id[9] = "Linux ";
static char sdebug_inq_product_id[17] = "scsi_debug ";
-static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
+static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a = 0x3222222000000000ULL;
static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -1411,6 +1500,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memcpy(&arr[8], sdebug_inq_vendor_id, 8);
memcpy(&arr[16], sdebug_inq_product_id, 16);
memcpy(&arr[32], sdebug_inq_product_rev, 4);
+ /* Use Vendor Specific area to place driver date in ASCII hex */
+ memcpy(&arr[36], sdebug_version_date, 8);
/* version descriptors (2 bytes each) follow */
put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -1900,7 +1991,7 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
+ 0, 0, 0, 0};
unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
@@ -2077,13 +2168,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
- case 0x3: /* Format device page, direct access */
+ case 0x3: /* Format device page, direct access */
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
} else
bad_pcode = true;
- break;
+ break;
case 0x8: /* Caching page, direct access */
if (is_disk) {
len = resp_caching_pg(ap, pcontrol, target);
@@ -2099,7 +2190,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
if ((subpcode > 0x2) && (subpcode < 0xff)) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2136,7 +2227,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
break;
default:
bad_pcode = true;
@@ -2172,8 +2263,8 @@ static int resp_mode_select(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
return check_condition_result;
}
- res = fetch_to_dev_buffer(scp, arr, param_len);
- if (-1 == res)
+ res = fetch_to_dev_buffer(scp, arr, param_len);
+ if (-1 == res)
return DID_ERROR << 16;
else if (sdebug_verbose && (res < param_len))
sdev_printk(KERN_INFO, scp->device,
@@ -2239,8 +2330,8 @@ static int resp_temp_l_pg(unsigned char * arr)
0x0, 0x1, 0x3, 0x2, 0x0, 65,
};
- memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
- return sizeof(temp_l_pg);
+ memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
+ return sizeof(temp_l_pg);
}
static int resp_ie_l_pg(unsigned char * arr)
@@ -2248,18 +2339,18 @@ static int resp_ie_l_pg(unsigned char * arr)
unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
};
- memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
+ memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
arr[4] = THRESHOLD_EXCEEDED;
arr[5] = 0xff;
}
- return sizeof(ie_l_pg);
+ return sizeof(ie_l_pg);
}
#define SDEBUG_MAX_LSENSE_SZ 512
-static int resp_log_sense(struct scsi_cmnd * scp,
- struct sdebug_dev_info * devip)
+static int resp_log_sense(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
int ppc, sp, pcode, subpcode, alloc_len, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
@@ -2353,8 +2444,8 @@ static int check_device_access_params(struct scsi_cmnd *scp,
}
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
- bool do_write)
+static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
+ u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
@@ -2380,14 +2471,15 @@ static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep + (block * sdebug_sector_size),
- (num - rest) * sdebug_sector_size, 0, do_write);
+ (num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep, rest * sdebug_sector_size,
- (num - rest) * sdebug_sector_size, do_write);
+ sg_skip + ((num - rest) * sdebug_sector_size),
+ do_write);
}
return ret;
@@ -2648,7 +2740,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, false);
+ ret = do_device_access(scp, 0, lba, num, false);
read_unlock_irqrestore(&atomic_rw, iflags);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2936,7 +3028,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, true);
+ ret = do_device_access(scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2970,6 +3062,173 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
+/*
+ * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
+ * No READ GATHERED yet (requires bidi or long cdb holding gather list).
+ */
+static int resp_write_scat(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u8 *lrdp = NULL;
+ u8 *up;
+ u8 wrprotect;
+ u16 lbdof, num_lrd, k;
+ u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
+ u32 lb_size = sdebug_sector_size;
+ u32 ei_lba;
+ u64 lba;
+ unsigned long iflags;
+ int ret, res;
+ bool is_16;
+ static const u32 lrd_size = 32; /* + parameter list header size */
+
+ if (cmd[0] == VARIABLE_LENGTH_CMD) {
+ is_16 = false;
+ wrprotect = (cmd[10] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 12);
+ num_lrd = get_unaligned_be16(cmd + 16);
+ bt_len = get_unaligned_be32(cmd + 28);
+ } else { /* that leaves WRITE SCATTERED(16) */
+ is_16 = true;
+ wrprotect = (cmd[2] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 4);
+ num_lrd = get_unaligned_be16(cmd + 8);
+ bt_len = get_unaligned_be32(cmd + 10);
+ if (unlikely(have_dif_prot)) {
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
+ wrprotect) {
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
+ wrprotect == 0)
+ sdev_printk(KERN_ERR, scp->device,
+ "Unprotected WR to DIF device\n");
+ }
+ }
+ if ((num_lrd == 0) || (bt_len == 0))
+ return 0; /* T10 says these do-nothings are not errors */
+ if (lbdof == 0) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LB Data Offset field bad\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lbdof_blen = lbdof * lb_size;
+ if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LBA range descriptors don't fit\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
+ if (lrdp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
+ my_name, __func__, lbdof_blen);
+ res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
+ if (res == -1) {
+ ret = DID_ERROR << 16;
+ goto err_out;
+ }
+
+ write_lock_irqsave(&atomic_rw, iflags);
+ sg_off = lbdof_blen;
+ /* Spec says Buffer xfer Length field in number of LBs in dout */
+ cum_lb = 0;
+ for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
+ lba = get_unaligned_be64(up + 0);
+ num = get_unaligned_be32(up + 8);
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
+ my_name, __func__, k, lba, num, sg_off);
+ if (num == 0)
+ continue;
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+ goto err_out_unlock;
+ num_by = num * lb_size;
+ ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
+
+ if ((cum_lb + num) > bt_len) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: sum of blocks > data provided\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
+ 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+
+ /* DIX + T10 DIF */
+ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
+ int prot_ret = prot_verify_write(scp, lba, num,
+ ei_lba);
+
+ if (prot_ret) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
+ prot_ret);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+
+ ret = do_device_access(scp, sg_off, lba, num, true);
+ if (unlikely(scsi_debug_lbp()))
+ map_region(lba, num);
+ if (unlikely(-1 == ret)) {
+ ret = DID_ERROR << 16;
+ goto err_out_unlock;
+ } else if (unlikely(sdebug_verbose && (ret < num_by)))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, num_by, ret);
+
+ if (unlikely(sdebug_any_injecting_opt)) {
+ struct sdebug_queued_cmd *sqcp =
+ (struct sdebug_queued_cmd *)scp->host_scribble;
+
+ if (sqcp) {
+ if (sqcp->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+ }
+ sg_off += num_by;
+ cum_lb += num;
+ }
+ ret = 0;
+err_out_unlock:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+err_out:
+ kfree(lrdp);
+ return ret;
+}
+
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
@@ -3177,7 +3436,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
* from data-in into arr. Safe (atomic) since write_lock held. */
fake_storep_hold = fake_storep;
fake_storep = arr;
- ret = do_device_access(scp, 0, dnum, true);
+ ret = do_device_access(scp, 0, 0, dnum, true);
fake_storep = fake_storep_hold;
if (ret == -1) {
retval = DID_ERROR << 16;
@@ -3495,6 +3754,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3603,12 +3863,12 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
if (!sdbg_host) {
pr_err("Host info NULL\n");
return NULL;
- }
+ }
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
- (devip->target == sdev->id) &&
- (devip->lun == sdev->lun))
- return devip;
+ (devip->target == sdev->id) &&
+ (devip->lun == sdev->lun))
+ return devip;
else {
if ((!devip->used) && (!open_devip))
open_devip = devip;
@@ -3660,6 +3920,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
blk_queue_max_segment_size(sdp->request_queue, -1U);
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
+ config_cdb_len(sdp);
return 0;
}
@@ -3678,13 +3939,14 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp)
+static void stop_qc_helper(struct sdebug_defer *sd_dp,
+ enum sdeb_defer_type defer_t)
{
if (!sd_dp)
return;
- if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
+ if (defer_t == SDEB_DEFER_HRT)
hrtimer_cancel(&sd_dp->hrt);
- else if (sdebug_jdelay < 0)
+ else if (defer_t == SDEB_DEFER_WQ)
cancel_work_sync(&sd_dp->ew.work);
}
@@ -3694,6 +3956,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
{
unsigned long iflags;
int j, k, qmax, r_qmax;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3717,8 +3980,13 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
return true;
}
@@ -3733,6 +4001,7 @@ static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3751,8 +4020,13 @@ static void stop_all_queued(void)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -3848,8 +4122,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *devip;
- struct scsi_device * sdp;
- struct Scsi_Host * hp;
+ struct scsi_device *sdp;
+ struct Scsi_Host *hp;
int k = 0;
++num_bus_resets;
@@ -3863,7 +4137,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
if (sdbg_host) {
list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
+ &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
@@ -3886,15 +4160,15 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
++num_host_resets;
if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
}
- }
- spin_unlock(&sdebug_host_list_lock);
+ }
+ spin_unlock(&sdebug_host_list_lock);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -3921,7 +4195,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
- starts[0] = sdebug_sectors_per;
+ starts[0] = sdebug_sectors_per;
for (k = 1; k < sdebug_num_parts; ++k)
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
@@ -3995,6 +4269,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
+ sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4003,7 +4278,7 @@ static void setup_inject(struct sdebug_queue *sqp,
* SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
- int scsi_result, int delta_jiff)
+ int scsi_result, int delta_jiff, int ndelay)
{
unsigned long iflags;
int k, num_in_q, qdepth, inject;
@@ -4081,20 +4356,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
setup_inject(sqp, sqcp);
- if (delta_jiff > 0 || sdebug_ndelay > 0) {
+ if (sd_dp == NULL) {
+ sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
+ if (sd_dp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (delta_jiff > 0 || ndelay > 0) {
ktime_t kt;
if (delta_jiff > 0) {
- struct timespec ts;
-
- jiffies_to_timespec(delta_jiff, &ts);
- kt = ktime_set(ts.tv_sec, ts.tv_nsec);
+ kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
} else
- kt = sdebug_ndelay;
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ kt = ndelay;
+ if (!sd_dp->init_hrt) {
+ sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -4104,12 +4379,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_HRT;
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (!sd_dp->init_wq) {
+ sd_dp->init_wq = true;
sqcp->sd_dp = sd_dp;
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
@@ -4117,6 +4391,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_WQ;
schedule_work(&sd_dp->ew.work);
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
@@ -4141,6 +4416,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
*/
module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, sdebug_ato, int, S_IRUGO);
+module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
@@ -4198,6 +4474,7 @@ MODULE_VERSION(SDEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
@@ -4210,7 +4487,8 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
-MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
+MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
+ SDEBUG_VERSION "\")");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
@@ -4360,9 +4638,6 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
@@ -4403,9 +4678,6 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
@@ -4426,15 +4698,15 @@ static ssize_t opts_show(struct device_driver *ddp, char *buf)
static ssize_t opts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int opts;
+ int opts;
char work[20];
- if (1 == sscanf(buf, "%10s", work)) {
- if (0 == strncasecmp(work,"0x", 2)) {
- if (1 == sscanf(&work[2], "%x", &opts))
+ if (sscanf(buf, "%10s", work) == 1) {
+ if (strncasecmp(work, "0x", 2) == 0) {
+ if (kstrtoint(work + 2, 16, &opts) == 0)
goto opts_done;
} else {
- if (1 == sscanf(work, "%d", &opts))
+ if (kstrtoint(work, 10, &opts) == 0)
goto opts_done;
}
}
@@ -4455,7 +4727,7 @@ static ssize_t ptype_show(struct device_driver *ddp, char *buf)
static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_ptype = n;
@@ -4472,7 +4744,7 @@ static ssize_t dsense_show(struct device_driver *ddp, char *buf)
static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_dsense = n;
@@ -4489,7 +4761,7 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
n = (n > 0);
@@ -4522,7 +4794,7 @@ static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_no_lun_0 = n;
@@ -4539,7 +4811,7 @@ static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_num_tgts = n;
@@ -4569,7 +4841,7 @@ static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int nth;
+ int nth;
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
sdebug_every_nth = nth;
@@ -4591,7 +4863,7 @@ static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4678,7 +4950,7 @@ static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4884,6 +5156,24 @@ static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(uuid_ctl);
+static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
+}
+static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int ret, n;
+
+ ret = kstrtoint(buf, 0, &n);
+ if (ret)
+ return ret;
+ sdebug_cdb_len = n;
+ all_config_cdb_len();
+ return count;
+}
+static DRIVER_ATTR_RW(cdb_len);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4923,6 +5213,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
+ &driver_attr_cdb_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5113,12 +5404,12 @@ static int __init scsi_debug_init(void)
host_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
+ for (k = 0; k < host_to_add; k++) {
+ if (sdebug_add_adapter()) {
pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
- }
- }
+ break;
+ }
+ }
if (sdebug_verbose)
pr_info("built %d host(s)\n", sdebug_add_host);
@@ -5161,53 +5452,53 @@ module_exit(scsi_debug_exit);
static void sdebug_release_adapter(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
sdbg_host = to_sdebug_host(dev);
- kfree(sdbg_host);
+ kfree(sdbg_host);
}
static int sdebug_add_adapter(void)
{
int k, devs_per_host;
- int error = 0;
- struct sdebug_host_info *sdbg_host;
+ int error = 0;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
- sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
- if (NULL == sdbg_host) {
+ sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
+ if (sdbg_host == NULL) {
pr_err("out of memory at line %d\n", __LINE__);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&sdbg_host->dev_info_list);
+ INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
- for (k = 0; k < devs_per_host; k++) {
+ for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ error = -ENOMEM;
goto clean;
- }
- }
+ }
+ }
- spin_lock(&sdebug_host_list_lock);
- list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ spin_lock(&sdebug_host_list_lock);
+ list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
+ spin_unlock(&sdebug_host_list_lock);
- sdbg_host->dev.bus = &pseudo_lld_bus;
- sdbg_host->dev.parent = pseudo_primary;
- sdbg_host->dev.release = &sdebug_release_adapter;
+ sdbg_host->dev.bus = &pseudo_lld_bus;
+ sdbg_host->dev.parent = pseudo_primary;
+ sdbg_host->dev.release = &sdebug_release_adapter;
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
- error = device_register(&sdbg_host->dev);
+ error = device_register(&sdbg_host->dev);
- if (error)
+ if (error)
goto clean;
++sdebug_add_host;
- return error;
+ return error;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
@@ -5217,20 +5508,20 @@ clean:
}
kfree(sdbg_host);
- return error;
+ return error;
}
static void sdebug_remove_adapter(void)
{
- struct sdebug_host_info * sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host = NULL;
- spin_lock(&sdebug_host_list_lock);
- if (!list_empty(&sdebug_host_list)) {
- sdbg_host = list_entry(sdebug_host_list.prev,
- struct sdebug_host_info, host_list);
+ spin_lock(&sdebug_host_list_lock);
+ if (!list_empty(&sdebug_host_list)) {
+ sdbg_host = list_entry(sdebug_host_list.prev,
+ struct sdebug_host_info, host_list);
list_del(&sdbg_host->host_list);
}
- spin_unlock(&sdebug_host_list_lock);
+ spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
@@ -5281,6 +5572,12 @@ static bool fake_timeout(struct scsi_cmnd *scp)
return false;
}
+static bool fake_host_busy(struct scsi_cmnd *scp)
+{
+ return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
+ (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -5323,6 +5620,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
b);
}
+ if (fake_host_busy(scp))
+ return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
goto err_out;
@@ -5420,12 +5719,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
errsts = r_pfp(scp, devip);
fini:
- return schedule_resp(scp, devip, errsts,
- ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
+ if (F_DELAY_OVERR & flags)
+ return schedule_resp(scp, devip, errsts, 0, 0);
+ else
+ return schedule_resp(scp, devip, errsts, sdebug_jdelay,
+ sdebug_ndelay);
check_cond:
- return schedule_resp(scp, devip, check_condition_result, 0);
+ return schedule_resp(scp, devip, check_condition_result, 0, 0);
err_out:
- return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
+ return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
}
static struct scsi_host_template sdebug_driver_template = {
@@ -5484,7 +5786,7 @@ static int sdebug_driver_probe(struct device * dev)
if (sdebug_mq_active)
hpnt->nr_hw_queues = submit_queues;
- sdbg_host->shost = hpnt;
+ sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
hpnt->max_id = sdebug_num_tgts + 1;
@@ -5542,12 +5844,12 @@ static int sdebug_driver_probe(struct device * dev)
sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
if (sdebug_every_nth) /* need stats counters for every_nth */
sdebug_statistics = true;
- error = scsi_add_host(hpnt, &sdbg_host->dev);
- if (error) {
+ error = scsi_add_host(hpnt, &sdbg_host->dev);
+ if (error) {
pr_err("scsi_add_host failed\n");
- error = -ENODEV;
+ error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else
scsi_scan_host(hpnt);
return error;
@@ -5555,7 +5857,7 @@ static int sdebug_driver_probe(struct device * dev)
static int sdebug_driver_remove(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = to_sdebug_host(dev);
@@ -5565,16 +5867,16 @@ static int sdebug_driver_remove(struct device * dev)
return -ENODEV;
}
- scsi_remove_host(sdbg_host->shost);
+ scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
- list_del(&sdbg_devinfo->dev_list);
- kfree(sdbg_devinfo);
- }
+ list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo);
+ }
- scsi_host_put(sdbg_host->shost);
- return 0;
+ scsi_host_put(sdbg_host->shost);
+ return 0;
}
static int pseudo_lld_bus_match(struct device *dev,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dfb8da83fa50..f3b117246d47 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -108,8 +108,8 @@ static struct {
* seagate controller, which causes SCSI code to reset bus.
*/
{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
- {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
- {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+ {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */
+ {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */
{"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
{"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
{"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
@@ -119,7 +119,7 @@ static struct {
{"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
- {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN},
{"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
@@ -158,8 +158,8 @@ static struct {
{"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
{"DELL", "PV530F", NULL, BLIST_SPARSELUN},
{"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
- {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
- {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
+ {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
+ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
@@ -181,15 +181,14 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
- {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "DF400", "*", BLIST_REPORTLUN2},
+ {"HP", "DF500", "*", BLIST_REPORTLUN2},
{"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -255,7 +254,6 @@ static struct {
{"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
{"SUN", "T300", "*", BLIST_SPARSELUN},
{"SUN", "T4", "*", BLIST_SPARSELUN},
- {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
@@ -353,7 +351,8 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, blist_flags_t flags, int key)
+ char *strflags, blist_flags_t flags,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -402,7 +401,7 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
- const char *model, int key)
+ const char *model, enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -485,7 +484,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
*
* Returns: 0 OK, -error on failure.
**/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *found;
@@ -587,20 +587,15 @@ blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key)
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
- int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
- err = PTR_ERR(devinfo);
- if (err != -ENOENT)
- return err;
-
- /* nothing found, return nothing */
+ /* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
@@ -774,7 +769,7 @@ void scsi_exit_devinfo(void)
* Adds the requested list, returns zero on success, -EEXIST if the
* key is already registered to a list, or other error on failure.
*/
-int scsi_dev_info_add_list(int key, const char *name)
+int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name)
{
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
@@ -806,7 +801,7 @@ EXPORT_SYMBOL(scsi_dev_info_add_list);
* frees the list itself. Returns 0 on success or -EINVAL if the key
* can't be found.
*/
-int scsi_dev_info_remove_list(int key)
+int scsi_dev_info_remove_list(enum scsi_devinfo_key key)
{
struct list_head *lh, *lh_next;
struct scsi_dev_info_list_table *devinfo_table =
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 2b785d09d5bd..b88b5dbbc444 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
+ {"IBM", "3542", "rdac", },
+ {"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
- {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
+ {"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 62b56de38ae8..d042915ce895 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,9 +61,10 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static int scsi_try_to_abort_cmd(struct scsi_host_template *,
struct scsi_cmnd *);
-/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
+ lockdep_assert_held(shost->host_lock);
+
if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
@@ -220,6 +221,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
}
}
+static void scsi_eh_inc_host_failed(struct rcu_head *head)
+{
+ struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+ scsi_eh_wakeup(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
scsi_eh_reset(scmd);
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- shost->host_failed++;
- scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * Ensure that all tasks observe the host state change before the
+ * host_failed change.
+ */
+ call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d9ca1dfab154..976c936029cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -164,7 +164,7 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
* for a requeue after completion, which should only occur in this
* file.
*/
-static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
@@ -220,7 +220,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
- __scsi_queue_insert(cmd, reason, 1);
+ __scsi_queue_insert(cmd, reason, true);
}
@@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+/*
+ * Decrement the host_busy counter and wake up the error handler if necessary.
+ * Avoid as follows that the error handler is not woken up if shost->host_busy
+ * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in its
+ * entirety either finishes before scsi_eh_scmd_add() increases the
+ * host_failed counter or that it notices the shost state change made by
+ * scsi_eh_scmd_add().
+ */
+static void scsi_dec_host_busy(struct Scsi_Host *shost)
{
- struct Scsi_Host *shost = sdev->host;
- struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
+ rcu_read_lock();
atomic_dec(&shost->host_busy);
- if (starget->can_queue > 0)
- atomic_dec(&starget->target_busy);
-
- if (unlikely(scsi_host_in_recovery(shost) &&
- (shost->host_failed || shost->host_eh_scheduled))) {
+ if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
- scsi_eh_wakeup(shost);
+ if (shost->host_failed || shost->host_eh_scheduled)
+ scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
+ rcu_read_unlock();
+}
+
+void scsi_device_unbusy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
+
+ scsi_dec_host_busy(shost);
+
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
atomic_dec(&sdev->device_busy);
}
@@ -998,11 +1015,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
break;
case ACTION_RETRY:
/* Retry the same command immediately */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
break;
case ACTION_DELAYED_RETRY:
/* Retry the same command after a delay */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
break;
}
}
@@ -1128,7 +1145,7 @@ EXPORT_SYMBOL(scsi_init_io);
* Called from inside blk_get_request() for pass-through requests and from
* inside scsi_init_command() for filesystem requests.
*/
-void scsi_initialize_rq(struct request *rq)
+static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1136,7 +1153,6 @@ void scsi_initialize_rq(struct request *rq)
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
-EXPORT_SYMBOL(scsi_initialize_rq);
/* Add a command to the list used by the aacraid and dpt_i2o drivers */
void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
@@ -1532,7 +1548,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
return 0;
}
@@ -2020,7 +2036,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a5946cd64caa..99f1db5e467e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
/* scsi_devinfo.c */
/* list of keys for the lists */
-enum {
+enum scsi_devinfo_key {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
};
@@ -56,13 +56,15 @@ extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key);
+ enum scsi_devinfo_key key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- blist_flags_t flags, int key);
-extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
-extern int scsi_dev_info_add_list(int key, const char *name);
-extern int scsi_dev_info_remove_list(int key);
+ blist_flags_t flags,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
+extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
@@ -184,7 +186,6 @@ void scsi_dh_release_device(struct scsi_device *sdev);
static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
-static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
/*
* internal scsi timeout functions: for use by mid-layer and transport
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 26ce17178401..91b90f672d23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1278,7 +1278,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add device: %d\n", error);
- scsi_dh_remove_device(sdev);
return error;
}
@@ -1287,7 +1286,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add class device: %d\n", error);
- scsi_dh_remove_device(sdev);
device_del(&sdev->sdev_gendev);
return error;
}
@@ -1354,7 +1352,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
bsg_unregister_queue(sdev->request_queue);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
- scsi_dh_remove_device(sdev);
device_del(dev);
} else
put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4664024bd5d3..be3be0f9cb2d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,8 +267,8 @@ static const struct {
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
- { FC_PORTSPEED_64BIT, "64 Gbit" },
- { FC_PORTSPEED_128BIT, "128 Gbit" },
+ { FC_PORTSPEED_64GBIT, "64 Gbit" },
+ { FC_PORTSPEED_128GBIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 10ebb213ddb3..871ea582029e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -1009,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev)
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ /*
+ * Because this function and the power management code both call
+ * scsi_device_quiesce(), it is not safe to perform domain validation
+ * while suspend or resume is in progress. Hence the
+ * lock/unlock_system_sleep() calls.
+ */
+ lock_system_sleep();
+
if (unlikely(spi_dv_in_progress(starget)))
- return;
+ goto unlock;
if (unlikely(scsi_device_get(sdev)))
- return;
+ goto unlock;
+
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
@@ -1049,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev)
out_put:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
+unlock:
+ unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a028ab3322a9..ce756d575aff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2172,7 +2172,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
/* Wait 1 second for next try */
msleep(1000);
- printk(".");
+ printk(KERN_CONT ".");
/*
* Wait for USB flash devices with slow firmware.
@@ -2202,9 +2202,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
if (spintime) {
if (scsi_status_is_good(the_result))
- printk("ready\n");
+ printk(KERN_CONT "ready\n");
else
- printk("not responding...\n");
+ printk(KERN_CONT "not responding...\n");
}
}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 11826c5c2dd4..62f04c0511cf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
}
static void ses_match_to_enclosure(struct enclosure_device *edev,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ int refresh)
{
+ struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent);
struct efd efd = {
.addr = 0,
};
- ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+ if (refresh)
+ ses_enclosure_data_process(edev, edev_sdev, 0);
if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
@@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev,
struct enclosure_device *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
- ses_match_to_enclosure(edev, sdev);
+ ses_match_to_enclosure(edev, sdev, 1);
prev = edev;
}
return -ENODEV;
@@ -768,7 +771,7 @@ page2_not_supported:
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
- ses_match_to_enclosure(edev, tmp_sdev);
+ ses_match_to_enclosure(edev, tmp_sdev, 0);
}
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f098877eed4a..0c434453aab3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1140,10 +1140,10 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon
}
#endif
-static unsigned int
+static __poll_t
sg_poll(struct file *filp, poll_table * wait)
{
- unsigned int res = 0;
+ __poll_t res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
@@ -1174,7 +1174,7 @@ sg_poll(struct file *filp, poll_table * wait)
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
- "sg_poll: res=0x%x\n", (int) res));
+ "sg_poll: res=0x%x\n", (__force u32) res));
return res;
}
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index 0f42a225a664..e6b779930230 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I.
-obj-m += smartpqi.o
+obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b141d7641a2e..6c399480783d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4712,7 +4712,7 @@ static ssize_t read_byte_cnt_show(struct device *dev,
static DEVICE_ATTR_RO(read_byte_cnt);
/**
- * read_us_show - return read us - overall time spent waiting on reads in ns.
+ * read_ns_show - return read ns - overall time spent waiting on reads in ns.
* @dev: struct device
* @attr: attribute structure
* @buf: buffer to return formatted data in
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3b3d1d050cac..40fc7a590e81 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1834,8 +1834,10 @@ static int storvsc_probe(struct hv_device *device,
fc_host_node_name(host) = stor_device->node_name;
fc_host_port_name(host) = stor_device->port_name;
stor_device->rport = fc_remote_port_add(host, 0, &ids);
- if (!stor_device->rport)
+ if (!stor_device->rport) {
+ ret = -ENOMEM;
goto err_out4;
+ }
}
#endif
return 0;
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 277752b0fc6f..1a1b5d9fe514 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -157,6 +157,8 @@ enum {
#define UTP_TRANSFER_REQ_LIST_READY 0x2
#define UTP_TASK_REQ_LIST_READY 0x4
#define UIC_COMMAND_READY 0x8
+#define HOST_ERROR_INDICATOR 0x10
+#define DEVICE_ERROR_INDICATOR 0x20
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -185,6 +187,10 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
+#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
+#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
+#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
@@ -192,10 +198,20 @@ enum {
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR 0x80000000
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
+#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
/* UECT - Host UIC Error Code Transport Layer 44h */
#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
+#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
+#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
+#define UIC_TRANSPORT_BAD_TC 0x10
+#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
+#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
/* UECDME - Host UIC Error Code DME 48h */
#define UIC_DME_ERROR 0x80000000
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2a9da2e0ea6b..2ba2b7b47f41 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -803,7 +803,9 @@ static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
static int wd719x_board_found(struct Scsi_Host *sh)
{
struct wd719x *wd = shost_priv(sh);
- char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
+ static const char * const card_types[] = {
+ "Unknown card", "WD7193", "WD7197", "WD7296"
+ };
int ret;
INIT_LIST_HEAD(&wd->active_scbs);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index d65345312527..7dcb14d303eb 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -27,6 +27,8 @@
#define DRIVER_NAME "armada_3700_spi"
+#define A3700_SPI_MAX_SPEED_HZ 100000000
+#define A3700_SPI_MAX_PRESCALE 30
#define A3700_SPI_TIMEOUT 10
/* SPI Register Offest */
@@ -184,12 +186,15 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
return 0;
}
-static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val |= A3700_SPI_FIFO_MODE;
+ if (enable)
+ val |= A3700_SPI_FIFO_MODE;
+ else
+ val &= ~A3700_SPI_FIFO_MODE;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
@@ -297,7 +302,7 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
- a3700_spi_fifo_mode_set(a3700_spi);
+ a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
a3700_spi_mode_set(a3700_spi, master->mode_bits);
@@ -416,15 +421,20 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
- unsigned int byte_len;
a3700_spi = spi_master_get_devdata(spi->master);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
- byte_len = xfer->bits_per_word >> 3;
+ /* Use 4 bytes long transfers. Each transfer method has its way to deal
+ * with the remaining bytes for non 4-bytes aligned transfers.
+ */
+ a3700_spi_bytelen_set(a3700_spi, 4);
- a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+ /* Initialize the working buffers */
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
}
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
@@ -491,7 +501,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
u32 val;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
@@ -514,9 +524,8 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
- u32 data = le32_to_cpu(val);
- memcpy(a3700_spi->rx_buf, &data, 4);
+ memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
a3700_spi->rx_buf += 4;
@@ -579,27 +588,26 @@ static int a3700_spi_prepare_message(struct spi_master *master,
if (ret)
return ret;
- a3700_spi_bytelen_set(a3700_spi, 4);
-
a3700_spi_mode_set(a3700_spi, spi->mode);
return 0;
}
-static int a3700_spi_transfer_one(struct spi_master *master,
+static int a3700_spi_transfer_one_fifo(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
- unsigned int nbits = 0;
+ unsigned int nbits = 0, byte_len;
u32 val;
- a3700_spi_transfer_setup(spi, xfer);
+ /* Make sure we use FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, true);
- a3700_spi->tx_buf = xfer->tx_buf;
- a3700_spi->rx_buf = xfer->rx_buf;
- a3700_spi->buf_len = xfer->len;
+ /* Configure FIFO thresholds */
+ byte_len = xfer->bits_per_word >> 3;
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
@@ -615,6 +623,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
+ /* Clear WFIFO, since it's last 2 bytes are shifted out during
+ * a read operation
+ */
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
+
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
@@ -729,6 +742,63 @@ out:
return ret;
}
+static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ u32 val;
+
+ /* Disable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, false);
+
+ while (a3700_spi->buf_len) {
+
+ /* When we have less than 4 bytes to transfer, switch to 1 byte
+ * mode. This is reset after each transfer
+ */
+ if (a3700_spi->buf_len < 4)
+ a3700_spi_bytelen_set(a3700_spi, 1);
+
+ if (a3700_spi->byte_len == 1)
+ val = *a3700_spi->tx_buf;
+ else
+ val = *(u32 *)a3700_spi->tx_buf;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ /* Wait for all the data to be shifted in / out */
+ while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
+ A3700_SPI_XFER_DONE))
+ cpu_relax();
+
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+
+ memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
+
+ a3700_spi->buf_len -= a3700_spi->byte_len;
+ a3700_spi->tx_buf += a3700_spi->byte_len;
+ a3700_spi->rx_buf += a3700_spi->byte_len;
+
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ a3700_spi_transfer_setup(spi, xfer);
+
+ if (xfer->tx_buf && xfer->rx_buf)
+ return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+
+ return a3700_spi_transfer_one_fifo(master, spi, xfer);
+}
+
static int a3700_spi_unprepare_message(struct spi_master *master,
struct spi_message *message)
{
@@ -778,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->transfer_one = a3700_spi_transfer_one;
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
- master->flags = SPI_MASTER_HALF_DUPLEX;
master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
@@ -818,6 +887,11 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
+ master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ clk_get_rate(spi->clk));
+ master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ A3700_SPI_MAX_PRESCALE);
+
ret = a3700_spi_init(spi);
if (ret)
goto error_clk;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 669470971023..4a11fc0d4136 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -291,6 +291,10 @@ struct atmel_spi {
struct spi_transfer *current_transfer;
int current_remaining_bytes;
int done_status;
+ dma_addr_t dma_addr_rx_bbuf;
+ dma_addr_t dma_addr_tx_bbuf;
+ void *addr_rx_bbuf;
+ void *addr_tx_bbuf;
struct completion xfer_completion;
@@ -436,6 +440,11 @@ static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
spin_unlock_irqrestore(&as->lock, as->flags);
}
+static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
+{
+ return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
+}
+
static inline bool atmel_spi_use_dma(struct atmel_spi *as,
struct spi_transfer *xfer)
{
@@ -448,7 +457,12 @@ static bool atmel_spi_can_dma(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
- return atmel_spi_use_dma(as, xfer);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
+ return atmel_spi_use_dma(as, xfer) &&
+ !atmel_spi_is_vmalloc_xfer(xfer);
+ else
+ return atmel_spi_use_dma(as, xfer);
+
}
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
@@ -594,6 +608,11 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
+ if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
+ as->current_transfer->len);
+ }
complete(&as->xfer_completion);
}
@@ -744,17 +763,41 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
goto err_exit;
/* Send both scatterlists */
- rxdesc = dmaengine_prep_slave_sg(rxchan,
- xfer->rx_sg.sgl, xfer->rx_sg.nents,
- DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ rxdesc = dmaengine_prep_slave_single(rxchan,
+ as->dma_addr_rx_bbuf,
+ xfer->len,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!rxdesc)
goto err_dma;
- txdesc = dmaengine_prep_slave_sg(txchan,
- xfer->tx_sg.sgl, xfer->tx_sg.nents,
- DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
+ txdesc = dmaengine_prep_slave_single(txchan,
+ as->dma_addr_tx_bbuf,
+ xfer->len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!txdesc)
goto err_dma;
@@ -1426,27 +1469,7 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
-#ifdef CONFIG_SOC_SAM_V4_V5
- /*
- * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
- * since this later function tries to map buffers with dma_map_sg()
- * even if they have not been allocated inside DMA-safe areas.
- * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
- * those ARM cores, the data cache follows the PIPT model.
- * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
- * In case of PIPT caches, there cannot be cache aliases.
- * However on ARM9 cores, the data cache follows the VIVT model, hence
- * the cache aliases issue can occur when buffers are allocated from
- * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
- * not taken into account or at least not handled completely (cache
- * lines of aliases are not invalidated).
- * This is not a theorical issue: it was reproduced when trying to mount
- * a UBI file-system on a at91sam9g35ek board.
- */
- as->caps.has_dma_support = false;
-#else
as->caps.has_dma_support = version >= 0x212;
-#endif
as->caps.has_pdc_support = version < 0x212;
}
@@ -1592,6 +1615,30 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_pdc = true;
}
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_rx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_rx_bbuf) {
+ as->use_dma = false;
+ } else {
+ as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_tx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_tx_bbuf) {
+ as->use_dma = false;
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
+ }
+ if (!as->use_dma)
+ dev_info(master->dev.parent,
+ " can not allocate dma coherent memory\n");
+ }
+
if (as->caps.has_dma_support && !as->use_dma)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
@@ -1664,6 +1711,14 @@ static int atmel_spi_remove(struct platform_device *pdev)
if (as->use_dma) {
atmel_spi_stop_dma(master);
atmel_spi_release_dma(master);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_tx_bbuf,
+ as->dma_addr_tx_bbuf);
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
}
spin_lock_irq(&as->lock);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index 6e409eabe1c9..d02ceb7a29d1 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -27,8 +27,6 @@ struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
void __iomem *mmio_base;
-
- size_t read_offset;
bool bspi; /* Boot SPI mode with memory mapping */
};
@@ -172,8 +170,6 @@ static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
-
- b53spi->read_offset = len;
}
static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
@@ -182,10 +178,10 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
u32 tmp;
int i;
- for (i = 0; i < b53spi->read_offset + len; i++) {
+ for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
- if (!cont && i == b53spi->read_offset + len - 1)
+ if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
@@ -194,8 +190,7 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
- bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
- b53spi->read_offset + len - 1);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
@@ -214,13 +209,11 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
for (i = 0; i < len; ++i) {
- int offset = b53spi->read_offset + i;
+ u16 reg = B53SPI_MSPI_RXRAM + 4 * (1 + i * 2);
/* Data stored in the transmit register file LSB */
- r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
+ r_buf[i] = (u8)bcm53xxspi_read(b53spi, reg);
}
-
- b53spi->read_offset = 0;
}
static int bcm53xxspi_transfer_one(struct spi_master *master,
@@ -238,7 +231,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
left = t->len;
while (left) {
size_t to_write = min_t(size_t, 16, left);
- bool cont = left - to_write > 0;
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_write > 0;
bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
left -= to_write;
@@ -250,9 +244,9 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
buf = (u8 *)t->rx_buf;
left = t->len;
while (left) {
- size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
- left);
- bool cont = left - to_read > 0;
+ size_t to_read = min_t(size_t, 16, left);
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_read > 0;
bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
left -= to_read;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 6ddb6ef1fda4..60d59b003aa4 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -945,6 +945,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
+ init_completion(&dspi->done);
+
ret = platform_get_irq(pdev, 0);
if (ret == 0)
ret = -EINVAL;
@@ -1021,8 +1023,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
- init_completion(&dspi->done);
-
/* Reset In/OUT SPI module */
iowrite32(0, dspi->base + SPIGCR0);
udelay(100);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b217c22ff72f..211cc7d75bf8 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -30,13 +30,11 @@
/* Slave spi_dev related */
struct chip_data {
- u8 cs; /* chip select pin */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
- u8 enable_dma;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index f652f70cb8db..0630962ce442 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -903,10 +903,9 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = (void *)&vf610_data, },
- { .compatible = "fsl,ls1021a-v1.0-dspi",
- .data = (void *)&ls1021a_v1_data, },
- { .compatible = "fsl,ls2085a-dspi", .data = (void *)&ls2085a_data, },
+ { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
+ { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
+ { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -980,7 +979,7 @@ static int dspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->cleanup = dspi_cleanup;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 79ddefe4180d..6f57592a7f95 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1622,6 +1622,11 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+ ret = spi_bitbang_start(&spi_imx->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ goto out_clk_put;
+ }
/* Request GPIO CS lines, if any */
if (!spi_imx->slave_mode && master->cs_gpios) {
@@ -1640,12 +1645,6 @@ static int spi_imx_probe(struct platform_device *pdev)
}
}
- ret = spi_bitbang_start(&spi_imx->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
- goto out_clk_put;
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
@@ -1668,12 +1667,23 @@ static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
spi_bitbang_stop(&spi_imx->bitbang);
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_unprepare(spi_imx->clk_ipg);
- clk_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+ clk_disable_unprepare(spi_imx->clk_per);
spi_imx_sdma_exit(spi_imx);
spi_master_put(master);
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cebfea5faa4b..dafed6280df3 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -198,8 +198,10 @@ static int jcore_spi_probe(struct platform_device *pdev)
/* Register our spi controller */
err = devm_spi_register_master(&pdev->dev, master);
- if (err)
+ if (err) {
+ clk_disable(clk);
goto exit;
+ }
return 0;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7f8429635502..5c82910e3480 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -599,6 +599,7 @@ static int meson_spicc_remove(struct platform_device *pdev)
static const struct of_device_id meson_spicc_of_match[] = {
{ .compatible = "amlogic,meson-gx-spicc", },
+ { .compatible = "amlogic,meson-axg-spicc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 8974bb340b3a..deca63e82ff6 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -94,6 +94,7 @@ struct orion_spi {
struct spi_master *master;
void __iomem *base;
struct clk *clk;
+ struct clk *axi_clk;
const struct orion_spi_dev *devdata;
struct orion_direct_acc direct_access[ORION_NUM_CHIPSELECTS];
@@ -634,6 +635,16 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status)
goto out;
+ /* The following clock is only used by some SoCs */
+ spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(spi->axi_clk) &&
+ PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+ status = -EPROBE_DEFER;
+ goto out_rel_clk;
+ }
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
+
tclk_hz = clk_get_rate(spi->clk);
/*
@@ -658,7 +669,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(spi->base)) {
status = PTR_ERR(spi->base);
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
/* Scan all SPI devices of this controller for direct mapped devices */
@@ -696,7 +707,7 @@ static int orion_spi_probe(struct platform_device *pdev)
PAGE_SIZE);
if (!spi->direct_access[cs].vaddr) {
status = -ENOMEM;
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
spi->direct_access[cs].size = PAGE_SIZE;
@@ -724,6 +735,8 @@ static int orion_spi_probe(struct platform_device *pdev)
out_rel_pm:
pm_runtime_disable(&pdev->dev);
+out_rel_axi_clk:
+ clk_disable_unprepare(spi->axi_clk);
out_rel_clk:
clk_disable_unprepare(spi->clk);
out:
@@ -738,6 +751,7 @@ static int orion_spi_remove(struct platform_device *pdev)
struct orion_spi *spi = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
spi_unregister_master(master);
@@ -754,6 +768,7 @@ static int orion_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
return 0;
}
@@ -763,6 +778,8 @@ static int orion_spi_runtime_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
return clk_prepare_enable(spi->clk);
}
#endif
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4cb515a3104c..b0822d1dba29 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1237,7 +1237,7 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
* different chip_info, release previously requested GPIO
*/
if (chip->gpiod_cs) {
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
chip->gpiod_cs = NULL;
}
@@ -1417,7 +1417,7 @@ static void cleanup(struct spi_device *spi)
if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
chip->gpiod_cs)
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
kfree(chip);
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index de7df20f8712..baa3a9fa2638 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,17 +1,7 @@
-/*
- * Copyright (C) 2009 Samsung Electronics Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2009 Samsung Electronics Co., Ltd.
+// Jaswinder Singh <jassi.brar@samsung.com>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index fcd261f98b9f..c5dcfb434a49 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -19,6 +19,7 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -55,9 +56,14 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
+ unsigned short unused_ss;
+ bool native_cs_inited;
+ bool native_cs_high;
bool slave_aborted;
};
+#define MAX_SS 3 /* Maximum number of native chip selects */
+
#define TMDR1 0x00 /* Transmit Mode Register 1 */
#define TMDR2 0x04 /* Transmit Mode Register 2 */
#define TMDR3 0x08 /* Transmit Mode Register 3 */
@@ -91,6 +97,8 @@ struct sh_msiof_spi_priv {
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
+#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
@@ -324,7 +332,7 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return val;
}
-static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
u32 cpol, u32 cpha,
u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
@@ -342,10 +350,13 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->master))
+ if (spi_controller_is_slave(p->master)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
- else
- sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ } else {
+ sh_msiof_write(p, TMDR1,
+ tmp | MDR1_TRMD | TMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ }
if (p->master->flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
@@ -528,8 +539,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-
- pm_runtime_get_sync(&p->pdev->dev);
+ u32 clr, set, tmp;
if (!np) {
/*
@@ -539,19 +549,31 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
spi->cs_gpio = (uintptr_t)spi->controller_data;
}
- /* Configure pins before deasserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ return 0;
+ }
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (spi_controller_is_slave(p->master))
+ return 0;
+ if (p->native_cs_inited &&
+ (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
+ return 0;
+ /* Configure native chip select mode/polarity early */
+ clr = MDR1_SYNCMD_MASK;
+ set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI;
+ if (spi->mode & SPI_CS_HIGH)
+ clr |= BIT(MDR1_SYNCAC_SHIFT);
+ else
+ set |= BIT(MDR1_SYNCAC_SHIFT);
+ pm_runtime_get_sync(&p->pdev->dev);
+ tmp = sh_msiof_read(p, TMDR1) & ~clr;
+ sh_msiof_write(p, TMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
-
+ p->native_cs_high = spi->mode & SPI_CS_HIGH;
+ p->native_cs_inited = true;
return 0;
}
@@ -560,13 +582,20 @@ static int sh_msiof_prepare_message(struct spi_master *master,
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
const struct spi_device *spi = msg->spi;
+ u32 ss, cs_high;
/* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ss = p->unused_ss;
+ cs_high = p->native_cs_high;
+ } else {
+ ss = spi->chip_select;
+ cs_high = !!(spi->mode & SPI_CS_HIGH);
+ }
+ sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
!!(spi->mode & SPI_CPHA),
!!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ !!(spi->mode & SPI_LSB_FIRST), cs_high);
return 0;
}
@@ -784,11 +813,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto stop_dma;
}
- /* wait for tx fifo to be emptied / rx fifo to be filled */
+ /* wait for tx/rx DMA completion */
ret = sh_msiof_wait_for_completion(p);
if (ret)
goto stop_reset;
+ if (!rx) {
+ reinit_completion(&p->done);
+ sh_msiof_write(p, IER, IER_TEOFE);
+
+ /* wait for tx fifo to be emptied */
+ ret = sh_msiof_wait_for_completion(p);
+ if (ret)
+ goto stop_reset;
+ }
+
/* clear status bits */
sh_msiof_reset_str(p);
@@ -912,9 +951,8 @@ static int sh_msiof_transfer_one(struct spi_master *master,
ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&p->pdev->dev),
- dev_name(&p->pdev->dev));
+ dev_warn_once(&p->pdev->dev,
+ "DMA not available, falling back to PIO\n");
break;
}
if (ret)
@@ -1071,6 +1109,45 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
+static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
+{
+ struct device *dev = &p->pdev->dev;
+ unsigned int used_ss_mask = 0;
+ unsigned int cs_gpios = 0;
+ unsigned int num_cs, i;
+ int ret;
+
+ ret = gpiod_count(dev, "cs");
+ if (ret <= 0)
+ return 0;
+
+ num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
+ for (i = 0; i < num_cs; i++) {
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
+ if (!IS_ERR(gpiod)) {
+ cs_gpios++;
+ continue;
+ }
+
+ if (PTR_ERR(gpiod) != -ENOENT)
+ return PTR_ERR(gpiod);
+
+ if (i >= MAX_SS) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ used_ss_mask |= BIT(i);
+ }
+ p->unused_ss = ffz(used_ss_mask);
+ if (cs_gpios && p->unused_ss >= MAX_SS) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1284,13 +1361,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
+ /* Setup GPIO chip selects */
+ master->num_chipselect = p->info->num_chipselect;
+ ret = sh_msiof_get_cs_gpios(p);
+ if (ret)
+ goto err1;
+
/* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
master->flags = chipdata->master_flags;
master->bus_num = pdev->id;
master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = p->info->num_chipselect;
master->setup = sh_msiof_spi_setup;
master->prepare_message = sh_msiof_prepare_message;
master->slave_abort = sh_msiof_slave_abort;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index bbb1a275f718..f009d76f96b1 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1072,7 +1072,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct sirfsoc_spi *sspi;
struct spi_master *master;
struct resource *mem_res;
- struct sirf_spi_comp_data *spi_comp_data;
+ const struct sirf_spi_comp_data *spi_comp_data;
int irq;
int ret;
const struct of_device_id *match;
@@ -1092,7 +1092,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->fifo_full_offset = ilog2(sspi->fifo_size);
- spi_comp_data = (struct sirf_spi_comp_data *)match->data;
+ spi_comp_data = match->data;
sspi->regs = spi_comp_data->regs;
sspi->type = spi_comp_data->type;
sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fb38234249a8..8533f4edd00a 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -541,7 +541,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e0b9fe1d0e37..63fedc49ae9c 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -381,6 +381,7 @@ static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
}
static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e19e395b0e44..491b54d986eb 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2276,9 +2276,9 @@ done:
return retval;
}
-static unsigned int comedi_poll(struct file *file, poll_table *wait)
+static __poll_t comedi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct comedi_file *cfp = file->private_data;
struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s, *s_read;
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index cc18e25103ca..a557be8a5076 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -119,7 +119,7 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
poll_initwait(&table);
while (1) {
long elapsed;
- int mask;
+ __poll_t mask;
mask = f->f_op->poll(f, &table.pt);
if (mask & (POLLRDNORM | POLLRDBAND | POLLIN |
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
index b82a47b9ef0b..f1d128b2dae9 100644
--- a/drivers/staging/irda/net/af_irda.c
+++ b/drivers/staging/irda/net/af_irda.c
@@ -1737,12 +1737,12 @@ static int irda_shutdown(struct socket *sock, int how)
/*
* Function irda_poll (file, sock, wait)
*/
-static unsigned int irda_poll(struct file * file, struct socket *sock,
+static __poll_t irda_poll(struct file * file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.c b/drivers/staging/irda/net/irnet/irnet_ppp.c
index 7025dcb853d0..75bf9e34311d 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.c
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.c
@@ -419,12 +419,12 @@ irnet_ctrl_read(irnet_socket * ap,
* Poll : called when someone do a select on /dev/irnet.
* Just check if there are new events...
*/
-static inline unsigned int
+static inline __poll_t
irnet_ctrl_poll(irnet_socket * ap,
struct file * file,
poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap);
@@ -608,12 +608,12 @@ dev_irnet_read(struct file * file,
/*
* Poll : called when someone do a select on /dev/irnet
*/
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file * file,
poll_table * wait)
{
irnet_socket * ap = file->private_data;
- unsigned int mask;
+ __poll_t mask;
DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
file, ap);
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.h b/drivers/staging/irda/net/irnet/irnet_ppp.h
index 32061442cc8e..e6d5aa2a8aac 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.h
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.h
@@ -70,7 +70,7 @@ static ssize_t
char __user *,
size_t,
loff_t *);
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file *,
poll_table *);
static long
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index 2e5d311d2438..db81ed527452 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
@@ -120,6 +120,7 @@ static struct shash_alg alg = {
.cra_name = "adler32",
.cra_driver_name = "adler32-zlib",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 7d49d4865298..ed46aaca0ba3 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -314,19 +314,20 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
unsigned long then;
struct timeval tv;
+ struct kvec iov = {
+ .iov_base = buffer,
+ .iov_len = nob
+ };
+ struct msghdr msg = {
+ .msg_flags = 0
+ };
LASSERT(nob > 0);
LASSERT(jiffies_left > 0);
- for (;;) {
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = 0
- };
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, nob);
+ for (;;) {
/* Set receive timeout to remaining time */
jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
@@ -338,7 +339,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
}
then = jiffies;
- rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
+ rc = sock_recvmsg(sock, &msg, 0);
jiffies_left -= jiffies - then;
if (rc < 0)
@@ -347,10 +348,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
if (!rc)
return -ECONNRESET;
- buffer = ((char *)buffer) + rc;
- nob -= rc;
-
- if (!nob)
+ if (!msg_data_left(&msg))
return 0;
if (jiffies_left <= 0)
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 5b2e47c246f3..6f59045be0f9 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -369,8 +369,6 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
}
ctx->pos = pos;
ll_finish_md_op_data(op_data);
- filp->f_version = inode->i_version;
-
out:
if (!rc)
ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
@@ -1678,7 +1676,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
else
fd->lfd_pos = offset;
file->f_pos = offset;
- file->f_version = 0;
}
ret = offset;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index b133fd00c08c..0d62fcf016dc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1296,15 +1296,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
ll_d2d(dentry)->lld_invalid = 1;
- /*
- * We should be careful about dentries created by d_obtain_alias().
- * These dentries are not put in the dentry tree, instead they are
- * linked to sb->s_anon through dentry->d_hash.
- * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
- * If we unhashed such a dentry, unmount would not be able to find
- * it and busy inodes would be reported.
- */
- if (d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
+ if (d_count(dentry) == 0)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index dd7596d8763d..6657ebbe068a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -1255,7 +1255,7 @@ static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma)
return videobuf_mmap_mapper(&pipe->outq, vma);
}
-static unsigned int atomisp_poll(struct file *file,
+static __poll_t atomisp_poll(struct file *file,
struct poll_table_struct *pt)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 5d3b0e5a1283..4ffff6f8b809 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2174,11 +2174,11 @@ static int bcm2048_fops_release(struct file *file)
return 0;
}
-static unsigned int bcm2048_fops_poll(struct file *file,
+static __poll_t bcm2048_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct bcm2048_device *bdev = video_drvdata(file);
- int retval = 0;
+ __poll_t retval = 0;
poll_wait(file, &bdev->read_queue, pts);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 155e8c758e4b..588743a6fd8a 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -573,7 +573,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll() - It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 6bd0717bf76e..a003603af725 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1197,12 +1197,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
}
/* copied from lirc_dev */
-static unsigned int poll(struct file *filep, poll_table *wait)
+static __poll_t poll(struct file *filep, poll_table *wait)
{
struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l->buf;
- unsigned int ret;
+ __poll_t ret;
dev_dbg(ir->dev, "%s called\n", __func__);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 9e2f0421a01e..a3a83424a926 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -1185,7 +1185,7 @@ static int iss_video_release(struct file *file)
return 0;
}
-static unsigned int iss_video_poll(struct file *file, poll_table *wait)
+static __poll_t iss_video_poll(struct file *file, poll_table *wait)
{
struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index 1e5cbc893496..69f530972273 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -287,10 +287,10 @@ aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
return copied;
}
-static unsigned int aim_poll(struct file *filp, poll_table *wait)
+static __poll_t aim_poll(struct file *filp, poll_table *wait)
{
struct aim_channel *c = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &c->wq, wait);
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
index e0748416aee5..7783bc2dd9f5 100644
--- a/drivers/staging/most/aim-v4l2/video.c
+++ b/drivers/staging/most/aim-v4l2/video.c
@@ -209,11 +209,11 @@ static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
return ret;
}
-static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+static __poll_t aim_vdev_poll(struct file *filp, poll_table *wait)
{
struct aim_fh *fh = filp->private_data;
struct most_video_dev *mdev = fh->mdev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* only wait if no data is available */
if (!data_ready(mdev))
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 87595c594b12..264ad362d858 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -637,8 +637,7 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
enable_hw_ecc = 1;
- chip->write_buf(mtd, p, eccsize * eccsteps);
- return 0;
+ return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
}
static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
@@ -653,7 +652,7 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
enable_read_hw_ecc = 1;
- chip->read_buf(mtd, p, eccsize * eccsteps);
+ nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index d99daf69e501..585c6aa124cd 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -326,10 +326,10 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
return count;
}
-static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
+static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
{
unsigned long flags;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(fp, &speakup_event, wait);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index a3d4610fbdbe..4c8c6fa0a79f 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -134,7 +134,7 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
if (copied < 0)
return (int)copied;
- if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
+ if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
return -EFAULT;
return copied;
@@ -146,7 +146,7 @@ static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
if (count > image[minor].size_buf)
count = image[minor].size_buf;
- if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
+ if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
return -EFAULT;
return vme_master_write(image[minor].resource, image[minor].kern_buf,
@@ -159,7 +159,7 @@ static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_to_user(buf, image_ptr, (unsigned long)count))
+ if (copy_to_user(buf, image_ptr, (unsigned long)count))
return -EFAULT;
return count;
@@ -171,7 +171,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_from_user(image_ptr, buf, (unsigned long)count))
+ if (copy_from_user(image_ptr, buf, (unsigned long)count))
return -EFAULT;
return count;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index e2bc99980f75..4c44d7bed01a 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -5,6 +5,7 @@ menuconfig TARGET_CORE
select CONFIGFS_FS
select CRC_T10DIF
select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select SGL_ALLOC
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 58caacd54a3b..c03a78ee26cd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2300,13 +2300,7 @@ queue_full:
void target_free_sgl(struct scatterlist *sgl, int nents)
{
- struct scatterlist *sg;
- int count;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
-
- kfree(sgl);
+ sgl_free_n_order(sgl, nents, 0);
}
EXPORT_SYMBOL(target_free_sgl);
@@ -2414,42 +2408,10 @@ int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
bool zero_page, bool chainable)
{
- struct scatterlist *sg;
- struct page *page;
- gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
- unsigned int nalloc, nent;
- int i = 0;
-
- nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
- if (chainable)
- nalloc++;
- sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- return -ENOMEM;
+ gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
- sg_init_table(sg, nalloc);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
- page = alloc_page(GFP_KERNEL | zero_flag);
- if (!page)
- goto out;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- return -ENOMEM;
+ *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
+ return *sgl ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(target_alloc_sgl);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dc63aba092e4..dfd23245f778 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -88,7 +88,6 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
- * @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -104,7 +103,6 @@ struct cpufreq_cooling_device {
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- get_static_t plat_get_static_power;
};
static DEFINE_IDA(cpufreq_ida);
@@ -319,60 +317,6 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
}
/**
- * get_static_power() - calculate the static power consumed by the cpus
- * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
- * @tz: thermal zone device in which we're operating
- * @freq: frequency in KHz
- * @power: pointer in which to store the calculated static power
- *
- * Calculate the static power consumed by the cpus described by
- * @cpu_actor running at frequency @freq. This function relies on a
- * platform specific function that should have been provided when the
- * actor was registered. If it wasn't, the static power is assumed to
- * be negligible. The calculated static power is stored in @power.
- *
- * Return: 0 on success, -E* on failure.
- */
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
- struct thermal_zone_device *tz, unsigned long freq,
- u32 *power)
-{
- struct dev_pm_opp *opp;
- unsigned long voltage;
- struct cpufreq_policy *policy = cpufreq_cdev->policy;
- struct cpumask *cpumask = policy->related_cpus;
- unsigned long freq_hz = freq * 1000;
- struct device *dev;
-
- if (!cpufreq_cdev->plat_get_static_power) {
- *power = 0;
- return 0;
- }
-
- dev = get_cpu_device(policy->cpu);
- WARN_ON(!dev);
-
- opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
- if (IS_ERR(opp)) {
- dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
- freq_hz, PTR_ERR(opp));
- return -EINVAL;
- }
-
- voltage = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (voltage == 0) {
- dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
- freq_hz);
- return -EINVAL;
- }
-
- return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
- voltage, power);
-}
-
-/**
* get_dynamic_power() - calculate the dynamic power
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
@@ -491,8 +435,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 *power)
{
unsigned long freq;
- int i = 0, cpu, ret;
- u32 static_power, dynamic_power, total_load = 0;
+ int i = 0, cpu;
+ u32 total_load = 0;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
@@ -522,22 +466,15 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
cpufreq_cdev->last_load = total_load;
- dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret) {
- kfree(load_cpu);
- return ret;
- }
+ *power = get_dynamic_power(cpufreq_cdev, freq);
if (load_cpu) {
trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
- load_cpu, i, dynamic_power,
- static_power);
+ load_cpu, i, *power);
kfree(load_cpu);
}
- *power = static_power + dynamic_power;
return 0;
}
@@ -561,8 +498,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus;
- u32 static_power, dynamic_power;
- int ret;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
@@ -572,13 +507,9 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
freq = cpufreq_cdev->freq_table[state].frequency;
- dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret)
- return ret;
+ *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- *power = static_power + dynamic_power;
- return ret;
+ return 0;
}
/**
@@ -606,21 +537,14 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
unsigned int cur_freq, target_freq;
- int ret;
- s32 dyn_power;
- u32 last_load, normalised_power, static_power;
+ u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
cur_freq = cpufreq_quick_get(policy->cpu);
- ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
- if (ret)
- return ret;
-
- dyn_power = power - static_power;
- dyn_power = dyn_power > 0 ? dyn_power : 0;
+ power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
- normalised_power = (dyn_power * 100) / last_load;
+ normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
*state = get_level(cpufreq_cdev, target_freq);
@@ -671,8 +595,6 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -684,8 +606,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
+ struct cpufreq_policy *policy, u32 capacitance)
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
@@ -755,8 +676,6 @@ __cpufreq_cooling_register(struct device_node *np,
}
if (capacitance) {
- cpufreq_cdev->plat_get_static_power = plat_static_func;
-
ret = update_freq_table(cpufreq_cdev, capacitance);
if (ret) {
cdev = ERR_PTR(ret);
@@ -813,13 +732,12 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+ return __cpufreq_cooling_register(NULL, policy, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
- * @np: a valid struct device_node to the cooling device device tree node
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
@@ -827,86 +745,45 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
* cooling devices. Using this API, the cpufreq cooling device will be
* linked to the device tree node provided.
*
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- if (!np)
- return ERR_PTR(-EINVAL);
-
- return __cpufreq_cooling_register(np, policy, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
-
-/**
- * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this function, the
- * cooling device will implement the power extensions by using a
- * simple cpu power model. The cpus must have registered their OPPs
- * using the OPP library.
- *
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
- *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
-{
- return __cpufreq_cooling_register(NULL, policy, capacitance,
- plat_static_func);
-}
-EXPORT_SYMBOL(cpufreq_power_cooling_register);
-
-/**
- * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @np: a valid struct device_node to the cooling device device tree node
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this API, the cpufreq
- * cooling device will be linked to the device tree node provided.
* Using this function, the cooling device will implement the power
* extensions by using a simple cpu power model. The cpus must have
* registered their OPPs using the OPP library.
*
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
+ * It also takes into account, if property present in policy CPU node, the
+ * static power consumed by the cpu.
*
* Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
+ * and NULL on failure.
*/
struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- if (!np)
- return ERR_PTR(-EINVAL);
+ struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
+ struct thermal_cooling_device *cdev = NULL;
+ u32 capacitance = 0;
+
+ if (!np) {
+ pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ policy->cpu);
+ return NULL;
+ }
+
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &capacitance);
- return __cpufreq_cooling_register(np, policy, capacitance,
- plat_static_func);
+ cdev = __cpufreq_cooling_register(np, policy, capacitance);
+ if (IS_ERR(cdev)) {
+ pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
+ policy->cpu, PTR_ERR(cdev));
+ cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+ return cdev;
}
-EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 5131bdc9e765..0edf4fcfea23 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2457,10 +2457,10 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
* Called without the kernel lock held - fine
*/
-static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct gsm_mux *gsm = tty->disc_data;
poll_wait(file, &tty->read_wait, wait);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index eea7b6cb3cc4..929434ebee50 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -180,7 +180,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
@@ -796,11 +796,11 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
* to caller.
* Returns a bit mask containing info on which ops will not block.
*/
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait)
{
struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_poll() called\n",__FILE__,__LINE__);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 30bb0900cd2f..e81d3db8ad63 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -135,7 +135,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
static int r3964_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count);
@@ -1216,14 +1216,14 @@ static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/* Called without the kernel lock held - fine */
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait)
{
struct r3964_info *pInfo = tty->disc_data;
struct r3964_client_info *pClient;
struct r3964_message *pMsg = NULL;
unsigned long flags;
- int result = POLLOUT;
+ __poll_t result = POLLOUT;
TRACE_L("POLL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 539b49adb6af..478a9b40fd03 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2368,10 +2368,10 @@ break_out:
* Called without the kernel lock held - fine
*/
-static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 3c573a8caa80..5a135ed46159 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -132,6 +132,33 @@ void serdev_device_close(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_close);
+static void devm_serdev_device_release(struct device *dev, void *dr)
+{
+ serdev_device_close(*(struct serdev_device **)dr);
+}
+
+int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
+{
+ struct serdev_device **dr;
+ int ret;
+
+ dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = serdev;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_serdev_device_open);
+
void serdev_device_write_wakeup(struct serdev_device *serdev)
{
complete(&serdev->write_comp);
@@ -280,8 +307,8 @@ static int serdev_drv_probe(struct device *dev)
static int serdev_drv_remove(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
-
- sdrv->remove(to_serdev_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_serdev_device(dev));
return 0;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index dc60aeea87d8..00d14d6a76bb 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -144,7 +144,7 @@ static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
ssize_t redirected_tty_write(struct file *, const char __user *,
size_t, loff_t *);
-static unsigned int tty_poll(struct file *, poll_table *);
+static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -443,7 +443,7 @@ static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
}
/* No kernel lock held - none needed ;) */
-static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
+static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
{
return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
}
@@ -2055,11 +2055,11 @@ retry_open:
* may be re-entered freely by other callers.
*/
-static unsigned int tty_poll(struct file *filp, poll_table *wait)
+static __poll_t tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
- int ret = 0;
+ __poll_t ret = 0;
if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
return 0;
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 85b6634f518a..3e64ccd0040f 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -559,11 +559,11 @@ unlock_out:
return ret;
}
-static unsigned int
+static __poll_t
vcs_poll(struct file *file, poll_table *wait)
{
struct vcs_poll_data *poll = vcs_poll_data_get(file);
- int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
+ __poll_t ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
if (poll) {
poll_wait(file, &poll->waitq, wait);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ff04b7f8549f..85bc1aaea4a4 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -496,7 +496,7 @@ static int uio_release(struct inode *inode, struct file *filep)
return ret;
}
-static unsigned int uio_poll(struct file *filep, poll_table *wait)
+static __poll_t uio_poll(struct file *filep, poll_table *wait)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6c181a625daf..9627ea6ec3ae 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -595,11 +595,11 @@ static int wdm_flush(struct file *file, fl_owner_t id)
return usb_translate_errors(desc->werr);
}
-static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
{
struct wdm_device *desc = file->private_data;
unsigned long flags;
- unsigned int mask = 0;
+ __poll_t mask = 0;
spin_lock_irqsave(&desc->iuspin, flags);
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index c454885ef4a0..f45e8877771a 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -469,9 +469,9 @@ static int usblp_release(struct inode *inode, struct file *file)
}
/* No kernel lock - fine */
-static unsigned int usblp_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
{
- int ret;
+ __poll_t ret;
unsigned long flags;
struct usblp *usblp = file->private_data;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 0b8b0f3bdd2f..7ea67a55be10 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1257,10 +1257,10 @@ static int usbtmc_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &data->fasync);
}
-static unsigned int usbtmc_poll(struct file *file, poll_table *wait)
+static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
{
struct usbtmc_device_data *data = file->private_data;
- unsigned int mask;
+ __poll_t mask;
mutex_lock(&data->io_mutex);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index c2cf62b7043a..e2cec448779e 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -622,7 +622,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
}
/* Kernel lock for "lastev" protection */
-static unsigned int usb_device_poll(struct file *file,
+static __poll_t usb_device_poll(struct file *file,
struct poll_table_struct *wait)
{
unsigned int event_count;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a3fad4ec9870..5e72bf36aca4 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -595,7 +595,7 @@ static void async_completed(struct urb *urb)
as->status = urb->status;
signr = as->signr;
if (signr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = as->signr;
sinfo.si_errno = as->status;
sinfo.si_code = SI_ASYNCIO;
@@ -2572,11 +2572,11 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
#endif
/* No kernel lock - fine */
-static unsigned int usbdev_poll(struct file *file,
+static __poll_t usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
struct usb_dev_state *ps = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &ps->wait, wait);
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
@@ -2613,7 +2613,7 @@ static void usbdev_remove(struct usb_device *udev)
wake_up_all(&ps->wait);
list_del_init(&ps->list);
if (ps->discsignr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = ps->discsignr;
sinfo.si_errno = EPIPE;
sinfo.si_code = SI_ASYNCIO;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index b6cf5ab5a0a1..b540935891af 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -638,10 +638,10 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
return ret;
}
-static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
+static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
{
struct ffs_data *ffs = file->private_data;
- unsigned int mask = POLLWRNORM;
+ __poll_t mask = POLLWRNORM;
int ret;
poll_wait(file, &ffs->ev.waitq, wait);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index daae35318a3a..a73efb1c47d0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -413,10 +413,10 @@ release_write_pending_unlocked:
return status;
}
-static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
+static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index c5bce8e22983..5780fba620ab 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -73,9 +73,7 @@ struct f_ncm {
struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count;
bool timer_force_tx;
- struct tasklet_struct tx_tasklet;
struct hrtimer task_timer;
-
bool timer_stopping;
};
@@ -1104,7 +1102,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
/* Delay the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_SOFT);
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
@@ -1148,17 +1146,15 @@ err:
}
/*
- * This transmits the NTB if there are frames waiting.
+ * The transmit should only be run if no skb data has been sent
+ * for a certain duration.
*/
-static void ncm_tx_tasklet(unsigned long data)
+static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{
- struct f_ncm *ncm = (void *)data;
-
- if (ncm->timer_stopping)
- return;
+ struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
/* Only send if data is available. */
- if (ncm->skb_tx_data) {
+ if (!ncm->timer_stopping && ncm->skb_tx_data) {
ncm->timer_force_tx = true;
/* XXX This allowance of a NULL skb argument to ndo_start_xmit
@@ -1171,16 +1167,6 @@ static void ncm_tx_tasklet(unsigned long data)
ncm->timer_force_tx = false;
}
-}
-
-/*
- * The transmit should only be run if no skb data has been sent
- * for a certain duration.
- */
-static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
-{
- struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
- tasklet_schedule(&ncm->tx_tasklet);
return HRTIMER_NORESTART;
}
@@ -1513,8 +1499,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm);
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
@@ -1623,7 +1608,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer);
- tasklet_kill(&ncm->tx_tasklet);
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index dd607b99eb1d..453578c4af69 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -680,12 +680,12 @@ printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
return 0;
}
-static unsigned int
+static __poll_t
printer_poll(struct file *fd, poll_table *wait)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
- int status = 0;
+ __poll_t status = 0;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 278d50ff1eea..9e33d5206d54 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -193,7 +193,7 @@ int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
* This function implements video queue polling and is intended to be used by
* the device poll handler.
*/
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
return vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 51ee94e5cf2b..f9f65b5c1062 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -72,7 +72,7 @@ int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
int uvcg_dequeue_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *buf, int nonblocking);
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma);
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index f3069db6f08e..9a9019625496 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -329,7 +329,7 @@ uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvcg_queue_mmap(&uvc->video.queue, vma);
}
-static unsigned int
+static __poll_t
uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 9343ec436485..05691254d473 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1209,11 +1209,11 @@ dev_release (struct inode *inode, struct file *fd)
return 0;
}
-static unsigned int
+static __poll_t
ep0_poll (struct file *fd, poll_table *wait)
{
struct dev_data *dev = fd->private_data;
- int mask = 0;
+ __poll_t mask = 0;
if (dev->state <= STATE_DEV_OPENED)
return DEFAULT_POLLMASK;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index ad3109490c0f..1fa00b35f4ad 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -677,10 +677,10 @@ static int iowarrior_release(struct inode *inode, struct file *file)
return retval;
}
-static unsigned iowarrior_poll(struct file *file, poll_table * wait)
+static __poll_t iowarrior_poll(struct file *file, poll_table * wait)
{
struct iowarrior *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!dev->present)
return POLLERR | POLLHUP;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 5c1a3b852453..074398c1e410 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -409,10 +409,10 @@ exit:
/**
* ld_usb_poll
*/
-static unsigned int ld_usb_poll(struct file *file, poll_table *wait)
+static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
{
struct ld_usb *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c5be6e9e24a5..941c45028828 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -224,7 +224,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
static inline void tower_delete (struct lego_usb_tower *dev);
static int tower_open (struct inode *inode, struct file *file);
static int tower_release (struct inode *inode, struct file *file);
-static unsigned int tower_poll (struct file *file, poll_table *wait);
+static __poll_t tower_poll (struct file *file, poll_table *wait);
static loff_t tower_llseek (struct file *file, loff_t off, int whence);
static void tower_abort_transfers (struct lego_usb_tower *dev);
@@ -509,10 +509,10 @@ static void tower_check_for_read_packet (struct lego_usb_tower *dev)
/**
* tower_poll
*/
-static unsigned int tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll (struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f932f40302df..cc5b296bff3f 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1191,11 +1191,11 @@ static long mon_bin_compat_ioctl(struct file *file,
}
#endif /* CONFIG_COMPAT */
-static unsigned int
+static __poll_t
mon_bin_poll(struct file *file, struct poll_table_struct *wait)
{
struct mon_reader_bin *rp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (file->f_mode & FMODE_READ)
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 4797217e5e72..8cc4b48ff127 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -46,7 +46,7 @@ static void virqfd_deactivate(struct virqfd *virqfd)
static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
if (flags & POLLIN) {
/* An event has been signaled, call function */
@@ -113,7 +113,7 @@ int vfio_virqfd_enable(void *opaque,
struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
- unsigned int events;
+ __poll_t events;
virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
if (!virqfd)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 80323948c0dd..9c3f8160ef24 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1373,7 +1373,7 @@ static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
return vhost_chr_write_iter(dev, from);
}
-static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
+static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
{
struct vhost_net *n = file->private_data;
struct vhost_dev *dev = &n->dev;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5727b186b3ca..8d4374606756 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -170,7 +170,7 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
- if (!((unsigned long)key & poll->mask))
+ if (!(key_to_poll(key) & poll->mask))
return 0;
vhost_poll_queue(poll);
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev)
+ __poll_t mask, struct vhost_dev *dev)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
* keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
- unsigned long mask;
+ __poll_t mask;
int ret = 0;
if (poll->wqh)
@@ -211,7 +211,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
mask = file->f_op->poll(file, &poll->table);
if (mask)
- vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+ vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & POLLERR) {
if (poll->wqh)
remove_wait_queue(poll->wqh, &poll->wait);
@@ -1061,10 +1061,10 @@ done:
}
EXPORT_SYMBOL(vhost_chr_write_iter);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &dev->wait, wait);
@@ -1881,12 +1881,7 @@ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = vhost16_to_cpu(vq, desc->next);
- /* Make sure compiler knows to grab that: we don't want it changing! */
- /* We will use the result as an index in an array, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
+ next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
return next;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 79c6e7a60a5e..7876a3d7d1b3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -34,7 +34,7 @@ struct vhost_poll {
wait_queue_head_t *wqh;
wait_queue_entry_t wait;
struct vhost_work work;
- unsigned long mask;
+ __poll_t mask;
struct vhost_dev *dev;
};
@@ -43,7 +43,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
bool vhost_has_work(struct vhost_dev *dev);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev);
+ __poll_t mask, struct vhost_dev *dev);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
@@ -217,7 +217,7 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
int noblock);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index d84329676689..6a34ab936726 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -143,7 +143,7 @@ static int apple_bl_add(struct acpi_device *dev)
struct pci_dev *host;
int intensity;
- host = pci_get_bus_and_slot(0, 0);
+ host = pci_get_domain_bus_and_slot(0, 0, 0);
if (!host) {
pr_err("unable to find PCI host\n");
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c239ea3d09..f5574060f9c8 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = lcd->buf,
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index eab1f842f9c0..e4bd63e9db6b 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
spi_message_init(m);
- x->cs_change = 1;
+ x->cs_change = 0;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 6a41ea92737a..4dc5ee8debeb 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = buf,
};
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index cda7587cbc86..e707e617bf1c 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -556,7 +556,7 @@ static void __init iounmap_macfb(void)
static int __init macfb_init(void)
{
int video_cmap_len, video_is_nubus = 0;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
char *option = NULL;
int err;
@@ -670,15 +670,17 @@ static int __init macfb_init(void)
* code is really broken :-)
*/
- while ((ndev = nubus_find_type(NUBUS_CAT_DISPLAY,
- NUBUS_TYPE_VIDEO, ndev)))
- {
+ for_each_func_rsrc(ndev) {
unsigned long base = ndev->board->slot_addr;
if (mac_bi_data.videoaddr < base ||
mac_bi_data.videoaddr - base > 0xFFFFFF)
continue;
+ if (ndev->category != NUBUS_CAT_DISPLAY ||
+ ndev->type != NUBUS_TYPE_VIDEO)
+ continue;
+
video_is_nubus = 1;
slot_addr = (unsigned char *)base;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d70ad6d38879..b0597bef4555 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -565,11 +565,11 @@ static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
/*
* Returns a bitmask indicating whether a read will block
*/
-static unsigned int fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
{
struct doorbell_queue *dbq = filp->private_data;
unsigned long flags;
- unsigned int mask;
+ __poll_t mask;
spin_lock_irqsave(&dbq->lock, flags);
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a90728ceec5a..55e11bf8ebaf 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -13,9 +13,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/w1-gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/delay.h>
@@ -30,11 +29,17 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
pdata->pullup_duration = delay;
} else {
if (pdata->pullup_duration) {
- gpio_direction_output(pdata->pin, 1);
-
+ /*
+ * This will OVERRIDE open drain emulation and force-pull
+ * the line high for some time.
+ */
+ gpiod_set_raw_value(pdata->gpiod, 1);
msleep(pdata->pullup_duration);
-
- gpio_direction_input(pdata->pin);
+ /*
+ * This will simply set the line as input since we are doing
+ * open drain emulation in the GPIO library.
+ */
+ gpiod_set_value(pdata->gpiod, 1);
}
pdata->pullup_duration = 0;
}
@@ -42,28 +47,18 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
return 0;
}
-static void w1_gpio_write_bit_dir(void *data, u8 bit)
-{
- struct w1_gpio_platform_data *pdata = data;
-
- if (bit)
- gpio_direction_input(pdata->pin);
- else
- gpio_direction_output(pdata->pin, 0);
-}
-
-static void w1_gpio_write_bit_val(void *data, u8 bit)
+static void w1_gpio_write_bit(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
- gpio_set_value(pdata->pin, bit);
+ gpiod_set_value(pdata->gpiod, bit);
}
static u8 w1_gpio_read_bit(void *data)
{
struct w1_gpio_platform_data *pdata = data;
- return gpio_get_value(pdata->pin) ? 1 : 0;
+ return gpiod_get_value(pdata->gpiod) ? 1 : 0;
}
#if defined(CONFIG_OF)
@@ -74,107 +69,85 @@ static const struct of_device_id w1_gpio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
#endif
-static int w1_gpio_probe_dt(struct platform_device *pdev)
-{
- struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
- int gpio;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- if (of_get_property(np, "linux,open-drain", NULL))
- pdata->is_open_drain = 1;
-
- gpio = of_get_gpio(np, 0);
- if (gpio < 0) {
- if (gpio != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to parse gpio property for data pin (%d)\n",
- gpio);
-
- return gpio;
- }
- pdata->pin = gpio;
-
- gpio = of_get_gpio(np, 1);
- if (gpio == -EPROBE_DEFER)
- return gpio;
- /* ignore other errors as the pullup gpio is optional */
- pdata->ext_pullup_enable_pin = gpio;
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ /* Enforce open drain mode by default */
+ enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
int err;
if (of_have_populated_dt()) {
- err = w1_gpio_probe_dt(pdev);
- if (err < 0)
- return err;
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * This parameter means that something else than the gpiolib has
+ * already set the line into open drain mode, so we should just
+ * driver it high/low like we are in full control of the line and
+ * open drain will happen transparently.
+ */
+ if (of_get_property(np, "linux,open-drain", NULL))
+ gflags = GPIOD_OUT_LOW;
+
+ pdev->dev.platform_data = pdata;
}
-
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "No configuration data\n");
+ dev_err(dev, "No configuration data\n");
return -ENXIO;
}
- master = devm_kzalloc(&pdev->dev, sizeof(struct w1_bus_master),
+ master = devm_kzalloc(dev, sizeof(struct w1_bus_master),
GFP_KERNEL);
if (!master) {
- dev_err(&pdev->dev, "Out of memory\n");
+ dev_err(dev, "Out of memory\n");
return -ENOMEM;
}
- err = devm_gpio_request(&pdev->dev, pdata->pin, "w1");
- if (err) {
- dev_err(&pdev->dev, "gpio_request (pin) failed\n");
- return err;
+ pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
+ if (IS_ERR(pdata->gpiod)) {
+ dev_err(dev, "gpio_request (pin) failed\n");
+ return PTR_ERR(pdata->gpiod);
}
- if (gpio_is_valid(pdata->ext_pullup_enable_pin)) {
- err = devm_gpio_request_one(&pdev->dev,
- pdata->ext_pullup_enable_pin, GPIOF_INIT_LOW,
- "w1 pullup");
- if (err < 0) {
- dev_err(&pdev->dev, "gpio_request_one "
- "(ext_pullup_enable_pin) failed\n");
- return err;
- }
+ pdata->pullup_gpiod =
+ devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->pullup_gpiod)) {
+ dev_err(dev, "gpio_request_one "
+ "(ext_pullup_enable_pin) failed\n");
+ return PTR_ERR(pdata->pullup_gpiod);
}
master->data = pdata;
master->read_bit = w1_gpio_read_bit;
-
- if (pdata->is_open_drain) {
- gpio_direction_output(pdata->pin, 1);
- master->write_bit = w1_gpio_write_bit_val;
- } else {
- gpio_direction_input(pdata->pin);
- master->write_bit = w1_gpio_write_bit_dir;
+ gpiod_direction_output(pdata->gpiod, 1);
+ master->write_bit = w1_gpio_write_bit;
+
+ /*
+ * If we are using open drain emulation from the GPIO library,
+ * we need to use this pullup function that hammers the line
+ * high using a raw accessor to provide pull-up for the w1
+ * line.
+ */
+ if (gflags == GPIOD_OUT_LOW_OPEN_DRAIN)
master->set_pullup = w1_gpio_set_pullup;
- }
err = w1_add_master_device(master);
if (err) {
- dev_err(&pdev->dev, "w1_add_master device failed\n");
+ dev_err(dev, "w1_add_master device failed\n");
return err;
}
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 1);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 1);
platform_set_drvdata(pdev, master);
@@ -189,8 +162,8 @@ static int w1_gpio_remove(struct platform_device *pdev)
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 0);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 0);
w1_remove_master_device(master);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ca200d1f310a..5bf613d3b7d6 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,13 @@ config ZIIRAVE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called ziirave_wdt.
+config RAVE_SP_WATCHDOG
+ tristate "RAVE SP Watchdog timer"
+ depends on RAVE_SP_CORE
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog on RAVE SP device.
+
# ALPHA Architecture
# ARM Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 715a21078e0c..135c5e81f25e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -224,3 +224,4 @@ obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
+obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
new file mode 100644
index 000000000000..35db173252f9
--- /dev/null
+++ b/drivers/watchdog/rave-sp-wdt.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Driver for watchdog aspect of for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovation
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+enum {
+ RAVE_SP_RESET_BYTE = 1,
+ RAVE_SP_RESET_REASON_NORMAL = 0,
+ RAVE_SP_RESET_DELAY_MS = 500,
+};
+
+/**
+ * struct rave_sp_wdt_variant - RAVE SP watchdog variant
+ *
+ * @max_timeout: Largest possible watchdog timeout setting
+ * @min_timeout: Smallest possible watchdog timeout setting
+ *
+ * @configure: Function to send configuration command
+ * @restart: Function to send "restart" command
+ */
+struct rave_sp_wdt_variant {
+ unsigned int max_timeout;
+ unsigned int min_timeout;
+
+ int (*configure)(struct watchdog_device *, bool);
+ int (*restart)(struct watchdog_device *);
+};
+
+/**
+ * struct rave_sp_wdt - RAVE SP watchdog
+ *
+ * @wdd: Underlying watchdog device
+ * @sp: Pointer to parent RAVE SP device
+ * @variant: Device specific variant information
+ * @reboot_notifier: Reboot notifier implementing machine reset
+ */
+struct rave_sp_wdt {
+ struct watchdog_device wdd;
+ struct rave_sp *sp;
+ const struct rave_sp_wdt_variant *variant;
+ struct notifier_block reboot_notifier;
+};
+
+static struct rave_sp_wdt *to_rave_sp_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct rave_sp_wdt, wdd);
+}
+
+static int rave_sp_wdt_exec(struct watchdog_device *wdd, void *data,
+ size_t data_size)
+{
+ return rave_sp_exec(to_rave_sp_wdt(wdd)->sp,
+ data, data_size, NULL, 0);
+}
+
+static int rave_sp_wdt_legacy_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = 0,
+ [3] = on,
+ [4] = on ? wdd->timeout : 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = on,
+ [3] = (u8)wdd->timeout,
+ [4] = (u8)(wdd->timeout >> 8),
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+/**
+ * rave_sp_wdt_configure - Configure watchdog device
+ *
+ * @wdd: Device to configure
+ * @on: Desired state of the watchdog timer (ON/OFF)
+ *
+ * This function configures two aspects of the watchdog timer:
+ *
+ * - Wheither it is ON or OFF
+ * - Its timeout duration
+ *
+ * with first aspect specified via function argument and second via
+ * the value of 'wdd->timeout'.
+ */
+static int rave_sp_wdt_configure(struct watchdog_device *wdd, bool on)
+{
+ return to_rave_sp_wdt(wdd)->variant->configure(wdd, on);
+}
+
+static int rave_sp_wdt_legacy_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE,
+ [3] = RAVE_SP_RESET_REASON_NORMAL
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /*
+ * Restart handler is called in atomic context which means we
+ * can't communicate to SP via UART. Luckily for use SP will
+ * wait 500ms before actually resetting us, so we ask it to do
+ * so here and let the rest of the system go on wrapping
+ * things up.
+ */
+ if (action == SYS_DOWN || action == SYS_HALT) {
+ struct rave_sp_wdt *sp_wd =
+ container_of(nb, struct rave_sp_wdt, reboot_notifier);
+
+ const int ret = sp_wd->variant->restart(&sp_wd->wdd);
+
+ if (ret < 0)
+ dev_err(sp_wd->wdd.parent,
+ "Failed to issue restart command (%d)", ret);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rave_sp_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ /*
+ * The actual work was done by reboot notifier above. SP
+ * firmware waits 500 ms before issuing reset, so let's hang
+ * here for twice that delay and hopefuly we'd never reach
+ * the return statement.
+ */
+ mdelay(2 * RAVE_SP_RESET_DELAY_MS);
+
+ return -EIO;
+}
+
+static int rave_sp_wdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+
+ ret = rave_sp_wdt_configure(wdd, true);
+ if (!ret)
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+ return ret;
+}
+
+static int rave_sp_wdt_stop(struct watchdog_device *wdd)
+{
+ return rave_sp_wdt_configure(wdd, false);
+}
+
+static int rave_sp_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ return rave_sp_wdt_configure(wdd, watchdog_active(wdd));
+}
+
+static int rave_sp_wdt_ping(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_PET_WDT,
+ [1] = 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static const struct watchdog_info rave_sp_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "RAVE SP Watchdog",
+};
+
+static const struct watchdog_ops rave_sp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rave_sp_wdt_start,
+ .stop = rave_sp_wdt_stop,
+ .ping = rave_sp_wdt_ping,
+ .set_timeout = rave_sp_wdt_set_timeout,
+ .restart = rave_sp_wdt_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_legacy = {
+ .max_timeout = 255,
+ .min_timeout = 1,
+ .configure = rave_sp_wdt_legacy_configure,
+ .restart = rave_sp_wdt_legacy_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_rdu = {
+ .max_timeout = 180,
+ .min_timeout = 60,
+ .configure = rave_sp_wdt_rdu_configure,
+ .restart = rave_sp_wdt_rdu_restart,
+};
+
+static const struct of_device_id rave_sp_wdt_of_match[] = {
+ {
+ .compatible = "zii,rave-sp-watchdog-legacy",
+ .data = &rave_sp_wdt_legacy,
+ },
+ {
+ .compatible = "zii,rave-sp-watchdog",
+ .data = &rave_sp_wdt_rdu,
+ },
+ { /* sentinel */ }
+};
+
+static int rave_sp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+ struct rave_sp_wdt *sp_wd;
+ struct nvmem_cell *cell;
+ __le16 timeout = 0;
+ int ret;
+
+ sp_wd = devm_kzalloc(dev, sizeof(*sp_wd), GFP_KERNEL);
+ if (!sp_wd)
+ return -ENOMEM;
+
+ sp_wd->variant = of_device_get_match_data(dev);
+ sp_wd->sp = dev_get_drvdata(dev->parent);
+
+ wdd = &sp_wd->wdd;
+ wdd->parent = dev;
+ wdd->info = &rave_sp_wdt_info;
+ wdd->ops = &rave_sp_wdt_ops;
+ wdd->min_timeout = sp_wd->variant->min_timeout;
+ wdd->max_timeout = sp_wd->variant->max_timeout;
+ wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+ wdd->timeout = 60;
+
+ cell = nvmem_cell_get(dev, "wdt-timeout");
+ if (!IS_ERR(cell)) {
+ size_t len;
+ void *value = nvmem_cell_read(cell, &len);
+
+ if (!IS_ERR(value)) {
+ memcpy(&timeout, value, min(len, sizeof(timeout)));
+ kfree(value);
+ }
+ nvmem_cell_put(cell);
+ }
+ watchdog_init_timeout(wdd, le16_to_cpu(timeout), dev);
+ watchdog_set_restart_priority(wdd, 255);
+ watchdog_stop_on_unregister(wdd);
+
+ sp_wd->reboot_notifier.notifier_call = rave_sp_wdt_reboot_notifier;
+ ret = devm_register_reboot_notifier(dev, &sp_wd->reboot_notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register reboot notifier\n");
+ return ret;
+ }
+
+ /*
+ * We don't know if watchdog is running now. To be sure, let's
+ * start it and depend on watchdog core to ping it
+ */
+ wdd->max_hw_heartbeat_ms = wdd->max_timeout * 1000;
+ ret = rave_sp_wdt_start(wdd);
+ if (ret) {
+ dev_err(dev, "Watchdog didn't start\n");
+ return ret;
+ }
+
+ ret = devm_watchdog_register_device(dev, wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device\n");
+ rave_sp_wdt_stop(wdd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver rave_sp_wdt_driver = {
+ .probe = rave_sp_wdt_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = rave_sp_wdt_of_match,
+ },
+};
+
+module_platform_driver(rave_sp_wdt_driver);
+
+MODULE_DEVICE_TABLE(of, rave_sp_wdt_of_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP Watchdog driver");
+MODULE_ALIAS("platform:rave-sp-watchdog");
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 9729a64ea1a9..72c0416a01cc 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -621,9 +621,9 @@ static long evtchn_ioctl(struct file *file,
return rc;
}
-static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+static __poll_t evtchn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
struct per_user_data *u = file->private_data;
poll_wait(file, &u->evtchn_wait, wait);
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index 6cc1c15bcd84..9ade533d9e40 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -139,7 +139,7 @@ out:
return err ? err : buf - ubuf;
}
-static unsigned int xen_mce_chrdev_poll(struct file *file, poll_table *wait)
+static __poll_t xen_mce_chrdev_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &xen_mce_chrdev_wait, wait);
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 4c789e61554b..78804e71f9a6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -878,7 +878,7 @@ received:
return ret;
}
-static unsigned int pvcalls_front_poll_passive(struct file *file,
+static __poll_t pvcalls_front_poll_passive(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
@@ -935,12 +935,12 @@ static unsigned int pvcalls_front_poll_passive(struct file *file,
return 0;
}
-static unsigned int pvcalls_front_poll_active(struct file *file,
+static __poll_t pvcalls_front_poll_active(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int32_t in_error, out_error;
struct pvcalls_data_intf *intf = map->active.ring;
@@ -958,12 +958,12 @@ static unsigned int pvcalls_front_poll_active(struct file *file,
return mask;
}
-unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
+__poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
- int ret;
+ __poll_t ret;
pvcalls_enter();
if (!pvcalls_front_dev) {
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 82fc54f8eb77..5bb72d3f8337 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -36,7 +36,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/bootmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index f3b089b7c0b6..e17ec3fce590 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -645,7 +645,7 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
+static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
{
struct xenbus_file_priv *u = file->private_data;